diff options
Diffstat (limited to 'src/core/NEON/kernels/convolution')
68 files changed, 4856 insertions, 21054 deletions
diff --git a/src/core/NEON/kernels/convolution/common/padding.cpp b/src/core/NEON/kernels/convolution/common/padding.cpp index f57706fef6..5960e66968 100644 --- a/src/core/NEON/kernels/convolution/common/padding.cpp +++ b/src/core/NEON/kernels/convolution/common/padding.cpp @@ -81,7 +81,7 @@ template void copy_and_pad_tile( template void copy_and_pad_tile( unsigned int, unsigned int, unsigned int, - const float *, unsigned int, unsigned int, + float const *, unsigned int, unsigned int, float *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, float ); diff --git a/src/core/NEON/kernels/convolution/common/padding.hpp b/src/core/NEON/kernels/convolution/common/padding.hpp index b6f95872c0..397d902e29 100644 --- a/src/core/NEON/kernels/convolution/common/padding.hpp +++ b/src/core/NEON/kernels/convolution/common/padding.hpp @@ -34,20 +34,20 @@ namespace padding */ template <typename T> void copy_and_pad_tile( - unsigned int tile_rows, - unsigned int tile_cols, - unsigned int n_channels, - const T *inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - T* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride, - unsigned int pad_top, - unsigned int pad_left, - unsigned int pad_bottom, - unsigned int pad_right, - T pad_value=static_cast<T>(0) + const unsigned int tile_rows, + const unsigned int tile_cols, + const unsigned int n_channels, + const T * const inptr, + const unsigned int in_row_stride, + const unsigned int in_col_stride, + T* const outptr, + const unsigned int out_row_stride, + const unsigned int out_col_stride, + const unsigned int pad_top, + const unsigned int pad_left, + const unsigned int pad_bottom, + const unsigned int pad_right, + const T pad_value=static_cast<T>(0) ); /** Copy a tile and remove padding elements in the output. diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise.hpp deleted file mode 100644 index 70d6689731..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise.hpp +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include <arm_neon.h> -#include "activation.hpp" -#include "padding.hpp" - -namespace depthwise -{ - -namespace nck = neon_convolution_kernels; - -class IDepthwiseConvolution -{ - public: - virtual ~IDepthwiseConvolution() = default; - - virtual int output_size( - int dim_size, - unsigned int padding_before, - unsigned int padding_after - ) const = 0; - - /* Set input tensor and stride. */ - virtual void set_input(const void *inptr) = 0; - virtual void set_input(const void *inptr, int column_stride) = 0; - virtual void set_input(const void *inptr, int row_stride, int column_stride) = 0; - virtual void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) = 0; - - /* Set output tensor and stride. */ - virtual void set_output(void *outptr) = 0; - virtual void set_output(void *outptr, int column_stride) = 0; - virtual void set_output(void *outptr, int row_stride, int column_stride) = 0; - virtual void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) = 0; - - /* Weights and biases are re-ordered to improve memory access patterns. Use - * these methods to determine the size of the re-pack buffer and to set the - * address (and implicitly reorder the weights and biases into) the buffer. - */ - virtual size_t get_packed_params_size(void) const = 0; - virtual void set_packed_params_buffer(void *) = 0; - - virtual void pack_params(const void *weights, const void *biases=nullptr) const = 0; - virtual void pack_params(void *buffer, const void *weights, const void *biases=nullptr) const = 0; - virtual void pack_params( - void *buffer, - const void* weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases=nullptr - ) const = 0; - - /* Working space is used to pad tensors on the fly. Before running any - * inference check the amount of space required, allocate and provide a - * pointer to the convolution engine. - */ - virtual size_t get_working_space_size(unsigned int nthreads=1) const = 0; - virtual void set_working_space(void *) = 0; - - virtual unsigned int get_window(void) const = 0; - virtual void run( - unsigned int start, - unsigned int stop, - unsigned int threadid=0 - ) = 0; -}; - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename TIn, typename TBias, typename TOut, - typename Derived -> -class DepthwiseConvolutionBase : public IDepthwiseConvolution -{ - public: - // Information about the specific convolution instance - using InputType = TIn; - using BiasType = TBias; - using OutputType = TOut; - static constexpr int output_tile_rows = OutputTileRows; - static constexpr int output_tile_cols = OutputTileCols; - static constexpr int kernel_rows = KernelRows; - static constexpr int kernel_cols = KernelCols; - static constexpr int stride_rows = StrideRows; - static constexpr int stride_cols = StrideCols; - static constexpr int inner_tile_rows = stride_rows * (output_tile_rows - 1) + kernel_rows; - static constexpr int inner_tile_cols = stride_cols * (output_tile_cols - 1) + kernel_cols; - - /** Create a new depthwise convolution engine. - * - * @param[in] n_batches Number of batches tensors. - * @param[in] n_input_rows Number of rows in input tensor. - * @param[in] n_input_cols Number of columns in input tensor. - * @param[in] n_channels Number of channels in input and output tensors. - */ - DepthwiseConvolutionBase( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - /** Create a new depthwise convolution engine. - * - * @param[in] n_batches Number of batches tensors. - * @param[in] n_input_rows Number of rows in input tensor. - * @param[in] n_input_cols Number of columns in input tensor. - * @param[in] n_channels Number of channels in input and output tensors. - */ - DepthwiseConvolutionBase( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - // Cannot copy or move a DepthwiseConvolution. - DepthwiseConvolutionBase(DepthwiseConvolutionBase&) = delete; - DepthwiseConvolutionBase operator=(DepthwiseConvolutionBase&) = delete; - - /* Set input tensor and stride. */ - void set_input(const void *inptr) override; - void set_input(const void *inptr, int column_stride) override; - void set_input(const void *inptr, int row_stride, int column_stride) override; - void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) override; - - /* Set output tensor and stride. */ - void set_output(void *outptr) override; - void set_output(void *outptr, int column_stride) override; - void set_output(void *outptr, int row_stride, int column_stride) override; - void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) override; - - /** Get the number of output rows/columns. - * - * @param[in] dim_size Number of elements in the dimension (rows/columns) - * @param[in] same_padding True if the padding is SAME, otherwise false. - */ - static int get_output_size( - int dim_size, unsigned int padding_before, unsigned int padding_after - ); - - int output_size( - int dim_size, unsigned int padding_before, unsigned int padding_after - ) const override; - - /* Determine how much memory is required to store the packed weights and - * biases. - */ - size_t get_packed_params_size(void) const override; - - /* Set the buffer for the packed weights and biases, and perform the - * packing. - */ - void set_packed_params_buffer(void *buffer) override; - - void pack_params(const void *weights, const void *biases=nullptr) const override; - - void pack_params( - void *buffer, - const void *weights, - const void *biases=nullptr - ) const override; - - void pack_params( - void *buffer, - const void *weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases=nullptr - ) const override; - - /** Query the amount of working space required. - * @param[in] The largest number of threads which will be used to execute - * the kernel. - */ - size_t get_working_space_size(unsigned int n_threads=1) const override; - - /** Set the working space buffer. - */ - void set_working_space(void *buffer) override; - - /** Get the window of work to be performed by an instance of the operator. - */ - unsigned int get_window(void) const override; - - /** Perform a portion of the work associated with the operator. - * - * Will perform the window of work described by $[start, stop)$. - * - * @param[in] start Start of the window of work to perform. - * @param[in] stop End of the work to perform. - * @param[in] ID of the thread performing the work. - */ - void run( - unsigned int start, - unsigned int stop, - unsigned int threadid=0 - ) override; - - protected: - /** Get the value to use to pad the tensor. - */ - TIn _input_padding_value(void) const; - - /** Implementation of the parameter packing. - */ - void _pack_params( - void *buffer, - const void *weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases=nullptr - ) const; - - /** Process a tile-row of the tensors. - */ - void process_tile_row( - unsigned int threadid, - int n_channels, - const void* packed_params, - const InputType* inptr, - OutputType* outptr, - int row_pad_in_top, - int row_pad_in_left, - int row_pad_in_bottom, - int row_pad_out_bottom, - int n_tiles, - int n_input_cols, - int n_output_cols - ); - - /** Process a single tile of the tensor. - * - * This method will apply input/output padding (if required) and call the - * depthwise tile implementation. - */ - void process_tile( - unsigned int threadid, - int n_channels, - const void* packed_params, - const InputType* inptr, - OutputType* outptr, - int pad_in_top, - int pad_in_left, - int pad_in_bottom, - int pad_in_right, - int pad_out_bottom, - int pad_out_right - ); - - /** Perform depthwise convolution on a single tile. - */ - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const InputType* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - OutputType* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride - ); - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const InputType* inptrs[inner_tile_rows][inner_tile_cols], - OutputType* outptrs[output_tile_rows][output_tile_cols] - ); - - int n_channels(void) const; - - private: - // Member variables of instances of a convolution engine. - const InputType* _input; - OutputType* _output; - void* _packed_parameters; - void* _working_space; // Per-thread working space - const int _n_batches, _n_input_rows, _n_input_cols, _n_channels, - _n_output_rows, _n_output_cols, _n_tile_rows, _n_tile_cols; - const unsigned int _padding_top, _padding_left, _padding_bottom, _padding_right; - const nck::ActivationFunction _activation; - - // Stride information for a convolution instance - int _input_col_stride, _input_row_stride, _input_batch_stride; - int _output_col_stride, _output_row_stride, _output_batch_stride; - - // Methods for getting access to working space - size_t _get_input_working_space_size(void) const; - size_t _get_output_working_space_size(void) const; - - void *_get_input_working_space(unsigned int threadid) const; - void *_get_output_working_space(unsigned int threadid) const; -}; - - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename TIn, typename TBias, typename TOut -> -class DepthwiseConvolution : public DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - TIn, TBias, TOut, - DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - TIn, TBias, TOut - > -> -{ - using Base = DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - TIn, TBias, TOut, - DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - TIn, TBias, TOut - > >; - friend Base; - using InputType = typename Base::InputType; - using OutputType = typename Base::OutputType; - - public: - using Base::DepthwiseConvolutionBase; - - protected: - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const TIn* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - TOut* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride - ); - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const InputType* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - OutputType* outptrs[Base::output_tile_rows][Base::output_tile_cols] - ); -}; - - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -class DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float, float, float -> : public DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float, float, float, - DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float, float, float - > -> -{ - using Base = DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float, float, float, - DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float, float, float - > >; - friend Base; - using InputType = typename Base::InputType; - using OutputType = typename Base::OutputType; - - public: - DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - protected: - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const float* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - float* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride - ); - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const float* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float* outptrs[Base::output_tile_rows][Base::output_tile_cols] - ); -}; - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -class DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float16_t, float16_t, float16_t -> : public DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float16_t, float16_t, float16_t, - DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float16_t, float16_t, float16_t - > -> -{ - using Base = DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float16_t, float16_t, float16_t, - DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - float16_t, float16_t, float16_t - > >; - friend Base; - using InputType = typename Base::InputType; - using OutputType = typename Base::OutputType; - - public: - DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - protected: - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const float16_t* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - float16_t* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride - ); - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const float16_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float16_t* outptrs[Base::output_tile_rows][Base::output_tile_cols] - ); -}; -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp deleted file mode 100644 index 864c6e24a0..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_1x1_fp32_fp32.cpp +++ /dev/null @@ -1,1168 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "impl_fp32_fp32.hpp" - -namespace depthwise -{ - -using namespace neon_convolution_kernels; -using Conv = DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float, float>; - -#ifdef __aarch64__ -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x26, %[inptr0], %[input_row_stride]\n" - "add x21, %[input_col_stride1], %[input_col_stride1]\n" - "add x23, %[outptr0], %[output_row_stride]\n" - "add x27, x26, %[input_row_stride]\n" - "add x22, x21, %[input_col_stride1]\n" - "and x24, %[n_channels], #3\n" - "add x28, x27, %[input_row_stride]\n" - "lsr x25, %[n_channels], #2\n" - "cbz x25, 4f\n" - "1:\n" - "ldr q15, [%[wbptr]]\n" - "subs x25, x25, #1\n" - "mov v3.16b, v15.16b\n" - "ldr q14, [%[wbptr], #16]\n" - "mov v1.16b, v15.16b\n" - "ldr q13, [%[wbptr], #32]\n" - "mov v2.16b, v15.16b\n" - "ldr q12, [%[wbptr], #48]\n" - "mov v0.16b, v15.16b\n" - "ldr q11, [%[wbptr], #64]\n" - "ldr q10, [%[wbptr], #80]\n" - "ldr q9, [%[wbptr], #96]\n" - "ldr q8, [%[wbptr], #112]\n" - "ldr q7, [%[wbptr], #128]\n" - "ldr q6, [%[wbptr], #144]\n" - "ldr q24, [%[inptr0]]\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "ldr q22, [x26]\n" - "fmla v1.4s, v22.4s, v14.4s\n" - "ldr q19, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v19.4s, v14.4s\n" - "ldr q18, [x27]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "ldr q21, [x26, %[input_col_stride1]]\n" - "fmla v1.4s, v18.4s, v11.4s\n" - "ldr q17, [%[inptr0], x21]\n" - "ldr q20, [x28]\n" - "ldr q5, [x27, %[input_col_stride1]]\n" - "fmla v3.4s, v19.4s, v13.4s\n" - "fmla v3.4s, v18.4s, v8.4s\n" - "beq 3f\n" - "2:\n" - "fmla v3.4s, v21.4s, v10.4s\n" - "ldr q19, [x26, x21]\n" - "fmla v1.4s, v21.4s, v13.4s\n" - "ldr q23, [%[inptr0], x22]\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "ldr q22, [x28, %[input_col_stride1]]\n" - "fmla v0.4s, v21.4s, v14.4s\n" - "ldr q21, [x27, x21]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr q18, [x26, x22]\n" - "fmla v2.4s, v17.4s, v13.4s\n" - "ldr q16, [x28, x21]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "ldr q20, [x27, x22]\n" - "fmla v3.4s, v5.4s, v7.4s\n" - "ldr q4, [x28, x22]\n" - "fmla v2.4s, v5.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v1.4s, v5.4s, v10.4s\n" - "ldr q15, [%[wbptr]]\n" - "fmla v0.4s, v5.4s, v11.4s\n" - "ldr q14, [%[wbptr], #16]\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v1.4s, v19.4s, v12.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v2.4s, v19.4s, v10.4s\n" - "ldr q11, [%[wbptr], #64]\n" - "fmla v0.4s, v19.4s, v13.4s\n" - "ldr q24, [%[inptr0]]\n" - "fmla v1.4s, v22.4s, v7.4s\n" - "ldr q19, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "ldr q17, [%[inptr0], x21]\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "ldr q13, [%[wbptr], #32]\n" - "fmla v3.4s, v21.4s, v6.4s\n" - "add x26, x26, #16\n" - "fmla v1.4s, v21.4s, v9.4s\n" - "ldr q22, [x26]\n" - "fmla v2.4s, v21.4s, v7.4s\n" - "ldr q8, [%[wbptr], #112]\n" - "str q3, [%[outptr0]]\n" - "fmla v0.4s, v21.4s, v10.4s\n" - "fmla v1.4s, v16.4s, v6.4s\n" - "ldr q21, [x26, %[input_col_stride1]]\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "add x27, x27, #16\n" - "fmla v0.4s, v18.4s, v12.4s\n" - "ldr q10, [%[wbptr], #80]\n" - "str q1, [x23]\n" - "mov v3.16b, v15.16b\n" - "fmla v2.4s, v20.4s, v6.4s\n" - "ldr q18, [x27]\n" - "fmla v0.4s, v16.4s, v7.4s\n" - "ldr q12, [%[wbptr], #48]\n" - "mov v1.16b, v15.16b\n" - "ldr q5, [x27, %[input_col_stride1]]\n" - "str q2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "fmla v0.4s, v20.4s, v9.4s\n" - "ldr q7, [%[wbptr], #128]\n" - "mov v2.16b, v15.16b\n" - "add x28, x28, #16\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "ldr q20, [x28]\n" - "fmla v0.4s, v4.4s, v6.4s\n" - "ldr q9, [%[wbptr], #96]\n" - "fmla v1.4s, v22.4s, v14.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v3.4s, v19.4s, v13.4s\n" - "subs x25, x25, #1\n" - "str q0, [x23, %[output_col_stride1]]\n" - "fmla v2.4s, v19.4s, v14.4s\n" - "ldr q6, [%[wbptr], #144]\n" - "add x23, x23, #16\n" - "fmla v3.4s, v18.4s, v8.4s\n" - "fmla v1.4s, v18.4s, v11.4s\n" - "mov v0.16b, v15.16b\n" - "bne 2b\n" - "3:\n" - "fmla v3.4s, v21.4s, v10.4s\n" - "ldr q19, [x26, x21]\n" - "fmla v1.4s, v21.4s, v13.4s\n" - "ldr q23, [%[inptr0], x22]\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "ldr q22, [x28, %[input_col_stride1]]\n" - "fmla v0.4s, v21.4s, v14.4s\n" - "ldr q21, [x27, x21]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr q18, [x26, x22]\n" - "fmla v2.4s, v17.4s, v13.4s\n" - "ldr q16, [x28, x21]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "ldr q20, [x27, x22]\n" - "fmla v3.4s, v5.4s, v7.4s\n" - "ldr q4, [x28, x22]\n" - "fmla v2.4s, v5.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v1.4s, v5.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v5.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "add x26, x26, #16\n" - "fmla v1.4s, v19.4s, v12.4s\n" - "add x27, x27, #16\n" - "fmla v2.4s, v19.4s, v10.4s\n" - "add x28, x28, #16\n" - "fmla v0.4s, v19.4s, v13.4s\n" - "fmla v3.4s, v21.4s, v6.4s\n" - "fmla v1.4s, v22.4s, v7.4s\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "str q3, [%[outptr0]]\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "fmla v1.4s, v21.4s, v9.4s\n" - "fmla v2.4s, v21.4s, v7.4s\n" - "fmla v0.4s, v21.4s, v10.4s\n" - "fmla v1.4s, v16.4s, v6.4s\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "fmla v0.4s, v18.4s, v12.4s\n" - "str q1, [x23]\n" - "fmla v2.4s, v20.4s, v6.4s\n" - "fmla v0.4s, v16.4s, v7.4s\n" - "str q2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v20.4s, v9.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v0.4s, v4.4s, v6.4s\n" - "str q0, [x23, %[output_col_stride1]]\n" - "add x23, x23, #16\n" - "4:\n" - "cbz x24, 7f\n" - "ldr s15, [%[wbptr]]\n" - "mov v3.16b, v15.16b\n" - "ldr s14, [%[wbptr], #4]\n" - "mov v1.16b, v15.16b\n" - "ldr s13, [%[wbptr], #8]\n" - "mov v2.16b, v15.16b\n" - "ldr s12, [%[wbptr], #12]\n" - "mov v0.16b, v15.16b\n" - "ldr s11, [%[wbptr], #16]\n" - "ldr s10, [%[wbptr], #20]\n" - "subs x24, x24, #1\n" - "ldr s9, [%[wbptr], #24]\n" - "ldr s8, [%[wbptr], #28]\n" - "ldr s7, [%[wbptr], #32]\n" - "ldr s6, [%[wbptr], #36]\n" - "ldr s24, [%[inptr0]]\n" - "ldr s22, [x26]\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "ldr s19, [%[inptr0], %[input_col_stride1]]\n" - "fmla v1.4s, v22.4s, v14.4s\n" - "ldr s18, [x27]\n" - "fmla v2.4s, v19.4s, v14.4s\n" - "ldr s21, [x26, %[input_col_stride1]]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "ldr s17, [%[inptr0], x21]\n" - "fmla v1.4s, v18.4s, v11.4s\n" - "ldr s20, [x28]\n" - "ldr s5, [x27, %[input_col_stride1]]\n" - "fmla v3.4s, v19.4s, v13.4s\n" - "fmla v3.4s, v18.4s, v8.4s\n" - "beq 6f\n" - "5:\n" - "fmla v3.4s, v21.4s, v10.4s\n" - "ldr s19, [x26, x21]\n" - "fmla v1.4s, v21.4s, v13.4s\n" - "ldr s23, [%[inptr0], x22]\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "ldr s22, [x28, %[input_col_stride1]]\n" - "fmla v0.4s, v21.4s, v14.4s\n" - "ldr s21, [x27, x21]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr s18, [x26, x22]\n" - "fmla v2.4s, v17.4s, v13.4s\n" - "ldr s16, [x28, x21]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "ldr s20, [x27, x22]\n" - "fmla v3.4s, v5.4s, v7.4s\n" - "ldr s4, [x28, x22]\n" - "fmla v2.4s, v5.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v1.4s, v5.4s, v10.4s\n" - "ldr s15, [%[wbptr]]\n" - "fmla v0.4s, v5.4s, v11.4s\n" - "ldr s14, [%[wbptr], #4]\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v1.4s, v19.4s, v12.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v2.4s, v19.4s, v10.4s\n" - "ldr s11, [%[wbptr], #16]\n" - "fmla v0.4s, v19.4s, v13.4s\n" - "ldr s24, [%[inptr0]]\n" - "fmla v1.4s, v22.4s, v7.4s\n" - "ldr s19, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "ldr s17, [%[inptr0], x21]\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "ldr s13, [%[wbptr], #8]\n" - "fmla v3.4s, v21.4s, v6.4s\n" - "add x26, x26, #4\n" - "fmla v1.4s, v21.4s, v9.4s\n" - "ldr s22, [x26]\n" - "fmla v2.4s, v21.4s, v7.4s\n" - "ldr s8, [%[wbptr], #28]\n" - "str s3, [%[outptr0]]\n" - "fmla v0.4s, v21.4s, v10.4s\n" - "fmla v1.4s, v16.4s, v6.4s\n" - "ldr s21, [x26, %[input_col_stride1]]\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "add x27, x27, #4\n" - "fmla v0.4s, v18.4s, v12.4s\n" - "ldr s10, [%[wbptr], #20]\n" - "str s1, [x23]\n" - "mov v3.16b, v15.16b\n" - "fmla v2.4s, v20.4s, v6.4s\n" - "ldr s18, [x27]\n" - "fmla v0.4s, v16.4s, v7.4s\n" - "ldr s12, [%[wbptr], #12]\n" - "mov v1.16b, v15.16b\n" - "ldr s5, [x27, %[input_col_stride1]]\n" - "str s2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "fmla v0.4s, v20.4s, v9.4s\n" - "ldr s7, [%[wbptr], #32]\n" - "mov v2.16b, v15.16b\n" - "add x28, x28, #4\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "ldr s20, [x28]\n" - "fmla v0.4s, v4.4s, v6.4s\n" - "ldr s9, [%[wbptr], #24]\n" - "fmla v1.4s, v22.4s, v14.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v3.4s, v19.4s, v13.4s\n" - "subs x24, x24, #1\n" - "str s0, [x23, %[output_col_stride1]]\n" - "fmla v2.4s, v19.4s, v14.4s\n" - "ldr s6, [%[wbptr], #36]\n" - "add x23, x23, #4\n" - "fmla v3.4s, v18.4s, v8.4s\n" - "fmla v1.4s, v18.4s, v11.4s\n" - "mov v0.16b, v15.16b\n" - "bne 5b\n" - "6:\n" - "fmla v3.4s, v21.4s, v10.4s\n" - "ldr s19, [x26, x21]\n" - "fmla v1.4s, v21.4s, v13.4s\n" - "ldr s23, [%[inptr0], x22]\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "ldr s22, [x28, %[input_col_stride1]]\n" - "fmla v0.4s, v21.4s, v14.4s\n" - "ldr s21, [x27, x21]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr s18, [x26, x22]\n" - "fmla v2.4s, v17.4s, v13.4s\n" - "ldr s16, [x28, x21]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "ldr s20, [x27, x22]\n" - "fmla v3.4s, v5.4s, v7.4s\n" - "ldr s4, [x28, x22]\n" - "fmla v2.4s, v5.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v1.4s, v5.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v5.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "add x26, x26, #4\n" - "fmla v1.4s, v19.4s, v12.4s\n" - "add x27, x27, #4\n" - "fmla v2.4s, v19.4s, v10.4s\n" - "add x28, x28, #4\n" - "fmla v0.4s, v19.4s, v13.4s\n" - "fmla v3.4s, v21.4s, v6.4s\n" - "fmla v1.4s, v22.4s, v7.4s\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "str s3, [%[outptr0]]\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "fmla v1.4s, v21.4s, v9.4s\n" - "fmla v2.4s, v21.4s, v7.4s\n" - "fmla v0.4s, v21.4s, v10.4s\n" - "fmla v1.4s, v16.4s, v6.4s\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "fmla v0.4s, v18.4s, v12.4s\n" - "str s1, [x23]\n" - "fmla v2.4s, v20.4s, v6.4s\n" - "fmla v0.4s, v16.4s, v7.4s\n" - "str s2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v20.4s, v9.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v0.4s, v4.4s, v6.4s\n" - "str s0, [x23, %[output_col_stride1]]\n" - "add x23, x23, #4\n" - "7:\n" - : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x21, %[inptr0], %[input_row_stride]\n" - "add x24, %[input_col_stride1], %[input_col_stride1]\n" - "add x22, %[outptr0], %[output_row_stride]\n" - "add x23, x21, %[input_row_stride]\n" - "add x27, x24, %[input_col_stride1]\n" - "and x25, %[n_channels], #3\n" - "add x28, x23, %[input_row_stride]\n" - "lsr x26, %[n_channels], #2\n" - "cbz x26, 4f\n" - "1:\n" - "ldr q11, [%[wbptr]]\n" - "subs x26, x26, #1\n" - "mov v17.16b, v11.16b\n" - "ldr q13, [%[wbptr], #16]\n" - "mov v15.16b, v11.16b\n" - "ldr q4, [%[wbptr], #32]\n" - "mov v16.16b, v11.16b\n" - "ldr q2, [%[wbptr], #48]\n" - "mov v14.16b, v11.16b\n" - "ldr q5, [%[wbptr], #64]\n" - "ldr q10, [%[wbptr], #80]\n" - "ldr q1, [%[wbptr], #96]\n" - "ldr q12, [%[wbptr], #112]\n" - "ldr q0, [%[wbptr], #128]\n" - "ldr q3, [%[wbptr], #144]\n" - "ldr q6, [%[inptr0]]\n" - "fmla v17.4s, v6.4s, v13.4s\n" - "ldr q27, [x21]\n" - "fmla v15.4s, v27.4s, v13.4s\n" - "ldr q23, [%[inptr0], %[input_col_stride1]]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "ldr q24, [x23]\n" - "fmla v17.4s, v27.4s, v5.4s\n" - "ldr q22, [x21, %[input_col_stride1]]\n" - "ldr q9, [%[inptr0], x24]\n" - "ldr q8, [x28]\n" - "ldr q20, [x23, %[input_col_stride1]]\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "beq 3f\n" - "2:\n" - "fmla v17.4s, v24.4s, v12.4s\n" - "ldr q26, [x21, x24]\n" - "fmla v15.4s, v24.4s, v5.4s\n" - "ldr q27, [%[inptr0], x27]\n" - "fmla v16.4s, v22.4s, v5.4s\n" - "ldr q25, [x28, %[input_col_stride1]]\n" - "fmla v17.4s, v22.4s, v10.4s\n" - "ldr q24, [x23, x24]\n" - "fmla v15.4s, v22.4s, v4.4s\n" - "ldr q21, [x21, x27]\n" - "fmla v14.4s, v22.4s, v13.4s\n" - "ldr q7, [x28, x24]\n" - "fmla v17.4s, v9.4s, v2.4s\n" - "ldr q19, [x23, x27]\n" - "fmla v16.4s, v9.4s, v4.4s\n" - "ldr q18, [x28, x27]\n" - "fmla v15.4s, v8.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v17.4s, v20.4s, v0.4s\n" - "ldr q11, [%[wbptr]]\n" - "fmla v16.4s, v20.4s, v12.4s\n" - "ldr q13, [%[wbptr], #16]\n" - "fmla v15.4s, v20.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v14.4s, v20.4s, v5.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v17.4s, v26.4s, v1.4s\n" - "ldr q6, [%[inptr0]]\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "ldr q23, [%[inptr0], %[input_col_stride1]]\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "ldr q5, [%[wbptr], #64]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "ldr q9, [%[inptr0], x24]\n" - "fmla v15.4s, v25.4s, v0.4s\n" - "add x21, x21, #16\n" - "fmla v16.4s, v27.4s, v2.4s\n" - "ldr q27, [x21]\n" - "fmla v14.4s, v25.4s, v12.4s\n" - "ldr q4, [%[wbptr], #32]\n" - "fmla v17.4s, v24.4s, v3.4s\n" - "ldr q22, [x21, %[input_col_stride1]]\n" - "fmla v15.4s, v24.4s, v1.4s\n" - "add x23, x23, #16\n" - "fmla v16.4s, v24.4s, v0.4s\n" - "ldr q12, [%[wbptr], #112]\n" - "fmla v14.4s, v24.4s, v10.4s\n" - "ldr q24, [x23]\n" - "fmla v15.4s, v7.4s, v3.4s\n" - "ldr q20, [x23, %[input_col_stride1]]\n" - "fmla v16.4s, v21.4s, v1.4s\n" - "add x28, x28, #16\n" - "fmla v14.4s, v21.4s, v2.4s\n" - "ldr q10, [%[wbptr], #80]\n" - "movi v26.16b, #0\n" - "ldr q8, [x28]\n" - "fmla v16.4s, v19.4s, v3.4s\n" - "subs x26, x26, #1\n" - "fmla v14.4s, v7.4s, v0.4s\n" - "ldr q2, [%[wbptr], #48]\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "str q17, [%[outptr0]]\n" - "str q16, [%[outptr0], %[output_col_stride1]]\n" - "fmla v14.4s, v19.4s, v1.4s\n" - "str q15, [x22]\n" - "mov v17.16b, v11.16b\n" - "mov v15.16b, v11.16b\n" - "ldr q0, [%[wbptr], #128]\n" - "fmla v14.4s, v18.4s, v3.4s\n" - "ldr q1, [%[wbptr], #96]\n" - "mov v16.16b, v11.16b\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v17.4s, v6.4s, v13.4s\n" - "fmla v15.4s, v27.4s, v13.4s\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "ldr q3, [%[wbptr], #144]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "str q14, [x22, %[output_col_stride1]]\n" - "mov v14.16b, v11.16b\n" - "add x22, x22, #16\n" - "fmla v17.4s, v27.4s, v5.4s\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "bne 2b\n" - "3:\n" - "fmla v17.4s, v24.4s, v12.4s\n" - "ldr q26, [x21, x24]\n" - "fmla v15.4s, v24.4s, v5.4s\n" - "ldr q27, [%[inptr0], x27]\n" - "fmla v16.4s, v22.4s, v5.4s\n" - "ldr q25, [x28, %[input_col_stride1]]\n" - "fmla v17.4s, v22.4s, v10.4s\n" - "ldr q24, [x23, x24]\n" - "fmla v15.4s, v22.4s, v4.4s\n" - "ldr q21, [x21, x27]\n" - "fmla v14.4s, v22.4s, v13.4s\n" - "ldr q7, [x28, x24]\n" - "fmla v17.4s, v9.4s, v2.4s\n" - "ldr q19, [x23, x27]\n" - "fmla v16.4s, v9.4s, v4.4s\n" - "ldr q18, [x28, x27]\n" - "fmla v15.4s, v8.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v17.4s, v20.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v20.4s, v12.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v15.4s, v20.4s, v10.4s\n" - "add x21, x21, #16\n" - "fmla v14.4s, v20.4s, v5.4s\n" - "add x23, x23, #16\n" - "fmla v17.4s, v26.4s, v1.4s\n" - "add x28, x28, #16\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "movi v26.16b, #0\n" - "fmla v17.4s, v24.4s, v3.4s\n" - "fmla v16.4s, v27.4s, v2.4s\n" - "fmla v15.4s, v25.4s, v0.4s\n" - "fmla v14.4s, v25.4s, v12.4s\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "fmla v16.4s, v24.4s, v0.4s\n" - "str q17, [%[outptr0]]\n" - "fmla v15.4s, v24.4s, v1.4s\n" - "fmla v14.4s, v24.4s, v10.4s\n" - "fmla v16.4s, v21.4s, v1.4s\n" - "fmla v15.4s, v7.4s, v3.4s\n" - "fmla v14.4s, v21.4s, v2.4s\n" - "fmla v16.4s, v19.4s, v3.4s\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "fmla v14.4s, v7.4s, v0.4s\n" - "str q15, [x22]\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "fmla v14.4s, v19.4s, v1.4s\n" - "str q16, [%[outptr0], %[output_col_stride1]]\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v14.4s, v18.4s, v3.4s\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "str q14, [x22, %[output_col_stride1]]\n" - "add x22, x22, #16\n" - "4:\n" - "cbz x25, 7f\n" - "ldr s11, [%[wbptr]]\n" - "mov v17.16b, v11.16b\n" - "ldr s13, [%[wbptr], #4]\n" - "mov v15.16b, v11.16b\n" - "ldr s4, [%[wbptr], #8]\n" - "mov v16.16b, v11.16b\n" - "ldr s2, [%[wbptr], #12]\n" - "mov v14.16b, v11.16b\n" - "ldr s5, [%[wbptr], #16]\n" - "ldr s10, [%[wbptr], #20]\n" - "subs x25, x25, #1\n" - "ldr s1, [%[wbptr], #24]\n" - "ldr s12, [%[wbptr], #28]\n" - "ldr s0, [%[wbptr], #32]\n" - "ldr s3, [%[wbptr], #36]\n" - "ldr s6, [%[inptr0]]\n" - "ldr s27, [x21]\n" - "fmla v17.4s, v6.4s, v13.4s\n" - "ldr s23, [%[inptr0], %[input_col_stride1]]\n" - "fmla v15.4s, v27.4s, v13.4s\n" - "ldr s24, [x23]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "ldr s22, [x21, %[input_col_stride1]]\n" - "fmla v17.4s, v27.4s, v5.4s\n" - "ldr s9, [%[inptr0], x24]\n" - "ldr s8, [x28]\n" - "ldr s20, [x23, %[input_col_stride1]]\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "beq 6f\n" - "5:\n" - "fmla v17.4s, v24.4s, v12.4s\n" - "ldr s26, [x21, x24]\n" - "fmla v15.4s, v24.4s, v5.4s\n" - "ldr s27, [%[inptr0], x27]\n" - "fmla v16.4s, v22.4s, v5.4s\n" - "ldr s25, [x28, %[input_col_stride1]]\n" - "fmla v17.4s, v22.4s, v10.4s\n" - "ldr s24, [x23, x24]\n" - "fmla v15.4s, v22.4s, v4.4s\n" - "ldr s21, [x21, x27]\n" - "fmla v14.4s, v22.4s, v13.4s\n" - "ldr s7, [x28, x24]\n" - "fmla v17.4s, v9.4s, v2.4s\n" - "ldr s19, [x23, x27]\n" - "fmla v16.4s, v9.4s, v4.4s\n" - "ldr s18, [x28, x27]\n" - "fmla v15.4s, v8.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v17.4s, v20.4s, v0.4s\n" - "ldr s11, [%[wbptr]]\n" - "fmla v16.4s, v20.4s, v12.4s\n" - "ldr s13, [%[wbptr], #4]\n" - "fmla v15.4s, v20.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v14.4s, v20.4s, v5.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v17.4s, v26.4s, v1.4s\n" - "ldr s6, [%[inptr0]]\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "ldr s23, [%[inptr0], %[input_col_stride1]]\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "ldr s5, [%[wbptr], #16]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "ldr s9, [%[inptr0], x24]\n" - "fmla v15.4s, v25.4s, v0.4s\n" - "add x21, x21, #4\n" - "fmla v16.4s, v27.4s, v2.4s\n" - "ldr s27, [x21]\n" - "fmla v14.4s, v25.4s, v12.4s\n" - "ldr s4, [%[wbptr], #8]\n" - "fmla v17.4s, v24.4s, v3.4s\n" - "ldr s22, [x21, %[input_col_stride1]]\n" - "fmla v15.4s, v24.4s, v1.4s\n" - "add x23, x23, #4\n" - "fmla v16.4s, v24.4s, v0.4s\n" - "ldr s12, [%[wbptr], #28]\n" - "fmla v14.4s, v24.4s, v10.4s\n" - "ldr s24, [x23]\n" - "fmla v15.4s, v7.4s, v3.4s\n" - "ldr s20, [x23, %[input_col_stride1]]\n" - "fmla v16.4s, v21.4s, v1.4s\n" - "add x28, x28, #4\n" - "fmla v14.4s, v21.4s, v2.4s\n" - "ldr s10, [%[wbptr], #20]\n" - "movi v26.16b, #0\n" - "ldr s8, [x28]\n" - "fmla v16.4s, v19.4s, v3.4s\n" - "subs x25, x25, #1\n" - "fmla v14.4s, v7.4s, v0.4s\n" - "ldr s2, [%[wbptr], #12]\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "str s17, [%[outptr0]]\n" - "str s16, [%[outptr0], %[output_col_stride1]]\n" - "fmla v14.4s, v19.4s, v1.4s\n" - "str s15, [x22]\n" - "mov v17.16b, v11.16b\n" - "mov v15.16b, v11.16b\n" - "ldr s0, [%[wbptr], #32]\n" - "fmla v14.4s, v18.4s, v3.4s\n" - "ldr s1, [%[wbptr], #24]\n" - "mov v16.16b, v11.16b\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v17.4s, v6.4s, v13.4s\n" - "fmla v15.4s, v27.4s, v13.4s\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "ldr s3, [%[wbptr], #36]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "str s14, [x22, %[output_col_stride1]]\n" - "mov v14.16b, v11.16b\n" - "add x22, x22, #4\n" - "fmla v17.4s, v27.4s, v5.4s\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "bne 5b\n" - "6:\n" - "fmla v17.4s, v24.4s, v12.4s\n" - "ldr s26, [x21, x24]\n" - "fmla v15.4s, v24.4s, v5.4s\n" - "ldr s27, [%[inptr0], x27]\n" - "fmla v16.4s, v22.4s, v5.4s\n" - "ldr s25, [x28, %[input_col_stride1]]\n" - "fmla v17.4s, v22.4s, v10.4s\n" - "ldr s24, [x23, x24]\n" - "fmla v15.4s, v22.4s, v4.4s\n" - "ldr s21, [x21, x27]\n" - "fmla v14.4s, v22.4s, v13.4s\n" - "ldr s7, [x28, x24]\n" - "fmla v17.4s, v9.4s, v2.4s\n" - "ldr s19, [x23, x27]\n" - "fmla v16.4s, v9.4s, v4.4s\n" - "ldr s18, [x28, x27]\n" - "fmla v15.4s, v8.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v17.4s, v20.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v20.4s, v12.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v15.4s, v20.4s, v10.4s\n" - "add x21, x21, #4\n" - "fmla v14.4s, v20.4s, v5.4s\n" - "add x23, x23, #4\n" - "fmla v17.4s, v26.4s, v1.4s\n" - "add x28, x28, #4\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "movi v26.16b, #0\n" - "fmla v17.4s, v24.4s, v3.4s\n" - "fmla v16.4s, v27.4s, v2.4s\n" - "fmla v15.4s, v25.4s, v0.4s\n" - "fmla v14.4s, v25.4s, v12.4s\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "fmla v16.4s, v24.4s, v0.4s\n" - "str s17, [%[outptr0]]\n" - "fmla v15.4s, v24.4s, v1.4s\n" - "fmla v14.4s, v24.4s, v10.4s\n" - "fmla v16.4s, v21.4s, v1.4s\n" - "fmla v15.4s, v7.4s, v3.4s\n" - "fmla v14.4s, v21.4s, v2.4s\n" - "fmla v16.4s, v19.4s, v3.4s\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "fmla v14.4s, v7.4s, v0.4s\n" - "str s15, [x22]\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "fmla v14.4s, v19.4s, v1.4s\n" - "str s16, [%[outptr0], %[output_col_stride1]]\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v14.4s, v18.4s, v3.4s\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "str s14, [x22, %[output_col_stride1]]\n" - "add x22, x22, #4\n" - "7:\n" - : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr) - : [n_channels] "r" ((long) n_channels), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU6>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x21, %[inptr0], %[input_row_stride]\n" - "add x23, %[input_col_stride1], %[input_col_stride1]\n" - "add x24, %[outptr0], %[output_row_stride]\n" - "add x27, x21, %[input_row_stride]\n" - "add x22, x23, %[input_col_stride1]\n" - "and x25, %[n_channels], #3\n" - "add x28, x27, %[input_row_stride]\n" - "lsr x26, %[n_channels], #2\n" - "cbz x26, 4f\n" - "1:\n" - "ldr q19, [%[wbptr]]\n" - "subs x26, x26, #1\n" - "mov v3.16b, v19.16b\n" - "ldr q12, [%[wbptr], #16]\n" - "mov v1.16b, v19.16b\n" - "ldr q11, [%[wbptr], #32]\n" - "mov v2.16b, v19.16b\n" - "ldr q10, [%[wbptr], #48]\n" - "mov v0.16b, v19.16b\n" - "ldr q13, [%[wbptr], #64]\n" - "ldr q23, [%[wbptr], #80]\n" - "ldr q15, [%[wbptr], #96]\n" - "ldr q20, [%[wbptr], #112]\n" - "ldr q21, [%[wbptr], #128]\n" - "ldr q14, [%[wbptr], #144]\n" - "ldr q16, [%[inptr0]]\n" - "fmla v3.4s, v16.4s, v12.4s\n" - "ldr q28, [x21]\n" - "fmla v1.4s, v28.4s, v12.4s\n" - "ldr q22, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "ldr q24, [x27]\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "ldr q8, [x21, %[input_col_stride1]]\n" - "ldr q9, [%[inptr0], x23]\n" - "ldr q18, [x28]\n" - "ldr q6, [x27, %[input_col_stride1]]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "beq 3f\n" - "2:\n" - "fmla v3.4s, v24.4s, v20.4s\n" - "ldr q25, [x21, x23]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "ldr q28, [%[inptr0], x22]\n" - "fmla v2.4s, v8.4s, v13.4s\n" - "ldr q24, [x28, %[input_col_stride1]]\n" - "fmla v3.4s, v8.4s, v23.4s\n" - "ldr q27, [x27, x23]\n" - "fmla v1.4s, v8.4s, v11.4s\n" - "ldr q7, [x21, x22]\n" - "fmla v0.4s, v8.4s, v12.4s\n" - "ldr q17, [x28, x23]\n" - "fmla v3.4s, v9.4s, v10.4s\n" - "ldr q5, [x27, x22]\n" - "fmla v2.4s, v9.4s, v11.4s\n" - "ldr q4, [x28, x22]\n" - "fmla v1.4s, v18.4s, v20.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v3.4s, v6.4s, v21.4s\n" - "ldr q19, [%[wbptr]]\n" - "fmla v2.4s, v6.4s, v20.4s\n" - "ldr q12, [%[wbptr], #16]\n" - "fmla v1.4s, v6.4s, v23.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v6.4s, v13.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v3.4s, v25.4s, v15.4s\n" - "ldr q16, [%[inptr0]]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "ldr q22, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v25.4s, v23.4s\n" - "ldr q13, [%[wbptr], #64]\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "ldr q9, [%[inptr0], x23]\n" - "fmla v1.4s, v24.4s, v21.4s\n" - "add x21, x21, #16\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "ldr q28, [x21]\n" - "fmla v0.4s, v24.4s, v20.4s\n" - "ldr q11, [%[wbptr], #32]\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "ldr q8, [x21, %[input_col_stride1]]\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "add x27, x27, #16\n" - "fmla v2.4s, v27.4s, v21.4s\n" - "ldr q20, [%[wbptr], #112]\n" - "fmla v0.4s, v27.4s, v23.4s\n" - "ldr q24, [x27]\n" - "fmla v1.4s, v17.4s, v14.4s\n" - "ldr q6, [x27, %[input_col_stride1]]\n" - "fmla v2.4s, v7.4s, v15.4s\n" - "add x28, x28, #16\n" - "fmla v0.4s, v7.4s, v10.4s\n" - "ldr q23, [%[wbptr], #80]\n" - "movi v25.16b, #0\n" - "ldr q18, [x28]\n" - "fmla v2.4s, v5.4s, v14.4s\n" - "subs x26, x26, #1\n" - "fmla v0.4s, v17.4s, v21.4s\n" - "ldr q10, [%[wbptr], #48]\n" - "fmov v26.4s, #6.0\n" - "fmax v3.4s, v3.4s, v25.4s\n" - "fmax v2.4s, v2.4s, v25.4s\n" - "fmax v1.4s, v1.4s, v25.4s\n" - "fmla v0.4s, v5.4s, v15.4s\n" - "ldr q21, [%[wbptr], #128]\n" - "fmin v3.4s, v3.4s, v26.4s\n" - "fmin v2.4s, v2.4s, v26.4s\n" - "fmin v1.4s, v1.4s, v26.4s\n" - "str q3, [%[outptr0]]\n" - "str q2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v4.4s, v14.4s\n" - "str q1, [x24]\n" - "mov v3.16b, v19.16b\n" - "mov v1.16b, v19.16b\n" - "ldr q15, [%[wbptr], #96]\n" - "fmax v0.4s, v0.4s, v25.4s\n" - "ldr q14, [%[wbptr], #144]\n" - "mov v2.16b, v19.16b\n" - "add %[outptr0], %[outptr0], #16\n" - "fmin v0.4s, v0.4s, v26.4s\n" - "fmla v3.4s, v16.4s, v12.4s\n" - "fmla v1.4s, v28.4s, v12.4s\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "str q0, [x24, %[output_col_stride1]]\n" - "mov v0.16b, v19.16b\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "add x24, x24, #16\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "bne 2b\n" - "3:\n" - "fmla v3.4s, v24.4s, v20.4s\n" - "ldr q25, [x21, x23]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "ldr q28, [%[inptr0], x22]\n" - "fmla v2.4s, v8.4s, v13.4s\n" - "ldr q24, [x28, %[input_col_stride1]]\n" - "fmla v3.4s, v8.4s, v23.4s\n" - "ldr q27, [x27, x23]\n" - "fmla v1.4s, v8.4s, v11.4s\n" - "ldr q7, [x21, x22]\n" - "fmla v0.4s, v8.4s, v12.4s\n" - "ldr q17, [x28, x23]\n" - "fmla v3.4s, v9.4s, v10.4s\n" - "ldr q5, [x27, x22]\n" - "fmla v2.4s, v9.4s, v11.4s\n" - "ldr q4, [x28, x22]\n" - "fmla v1.4s, v18.4s, v20.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v3.4s, v6.4s, v21.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v2.4s, v6.4s, v20.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v1.4s, v6.4s, v23.4s\n" - "add x21, x21, #16\n" - "fmla v0.4s, v6.4s, v13.4s\n" - "add x27, x27, #16\n" - "fmla v3.4s, v25.4s, v15.4s\n" - "add x28, x28, #16\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "fmla v2.4s, v25.4s, v23.4s\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "movi v25.16b, #0\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "fmov v26.4s, #6.0\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "fmla v1.4s, v24.4s, v21.4s\n" - "fmla v0.4s, v24.4s, v20.4s\n" - "fmax v3.4s, v3.4s, v25.4s\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "fmla v2.4s, v27.4s, v21.4s\n" - "fmla v0.4s, v27.4s, v23.4s\n" - "fmin v3.4s, v3.4s, v26.4s\n" - "str q3, [%[outptr0]]\n" - "fmla v2.4s, v7.4s, v15.4s\n" - "fmla v0.4s, v7.4s, v10.4s\n" - "fmla v1.4s, v17.4s, v14.4s\n" - "fmla v2.4s, v5.4s, v14.4s\n" - "fmla v0.4s, v17.4s, v21.4s\n" - "fmax v1.4s, v1.4s, v25.4s\n" - "fmax v2.4s, v2.4s, v25.4s\n" - "fmla v0.4s, v5.4s, v15.4s\n" - "fmin v1.4s, v1.4s, v26.4s\n" - "fmin v2.4s, v2.4s, v26.4s\n" - "str q1, [x24]\n" - "str q2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v4.4s, v14.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmax v0.4s, v0.4s, v25.4s\n" - "fmin v0.4s, v0.4s, v26.4s\n" - "str q0, [x24, %[output_col_stride1]]\n" - "add x24, x24, #16\n" - "4:\n" - "cbz x25, 7f\n" - "ldr s19, [%[wbptr]]\n" - "mov v3.16b, v19.16b\n" - "ldr s12, [%[wbptr], #4]\n" - "mov v1.16b, v19.16b\n" - "ldr s11, [%[wbptr], #8]\n" - "mov v2.16b, v19.16b\n" - "ldr s10, [%[wbptr], #12]\n" - "mov v0.16b, v19.16b\n" - "ldr s13, [%[wbptr], #16]\n" - "ldr s23, [%[wbptr], #20]\n" - "subs x25, x25, #1\n" - "ldr s15, [%[wbptr], #24]\n" - "ldr s20, [%[wbptr], #28]\n" - "ldr s21, [%[wbptr], #32]\n" - "ldr s14, [%[wbptr], #36]\n" - "ldr s16, [%[inptr0]]\n" - "ldr s28, [x21]\n" - "fmla v3.4s, v16.4s, v12.4s\n" - "ldr s22, [%[inptr0], %[input_col_stride1]]\n" - "fmla v1.4s, v28.4s, v12.4s\n" - "ldr s24, [x27]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "ldr s8, [x21, %[input_col_stride1]]\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "ldr s9, [%[inptr0], x23]\n" - "ldr s18, [x28]\n" - "ldr s6, [x27, %[input_col_stride1]]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "beq 6f\n" - "5:\n" - "fmla v3.4s, v24.4s, v20.4s\n" - "ldr s25, [x21, x23]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "ldr s28, [%[inptr0], x22]\n" - "fmla v2.4s, v8.4s, v13.4s\n" - "ldr s24, [x28, %[input_col_stride1]]\n" - "fmla v3.4s, v8.4s, v23.4s\n" - "ldr s27, [x27, x23]\n" - "fmla v1.4s, v8.4s, v11.4s\n" - "ldr s7, [x21, x22]\n" - "fmla v0.4s, v8.4s, v12.4s\n" - "ldr s17, [x28, x23]\n" - "fmla v3.4s, v9.4s, v10.4s\n" - "ldr s5, [x27, x22]\n" - "fmla v2.4s, v9.4s, v11.4s\n" - "ldr s4, [x28, x22]\n" - "fmla v1.4s, v18.4s, v20.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v3.4s, v6.4s, v21.4s\n" - "ldr s19, [%[wbptr]]\n" - "fmla v2.4s, v6.4s, v20.4s\n" - "ldr s12, [%[wbptr], #4]\n" - "fmla v1.4s, v6.4s, v23.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v6.4s, v13.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v3.4s, v25.4s, v15.4s\n" - "ldr s16, [%[inptr0]]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "ldr s22, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v25.4s, v23.4s\n" - "ldr s13, [%[wbptr], #16]\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "ldr s9, [%[inptr0], x23]\n" - "fmla v1.4s, v24.4s, v21.4s\n" - "add x21, x21, #4\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "ldr s28, [x21]\n" - "fmla v0.4s, v24.4s, v20.4s\n" - "ldr s11, [%[wbptr], #8]\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "ldr s8, [x21, %[input_col_stride1]]\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "add x27, x27, #4\n" - "fmla v2.4s, v27.4s, v21.4s\n" - "ldr s20, [%[wbptr], #28]\n" - "fmla v0.4s, v27.4s, v23.4s\n" - "ldr s24, [x27]\n" - "fmla v1.4s, v17.4s, v14.4s\n" - "ldr s6, [x27, %[input_col_stride1]]\n" - "fmla v2.4s, v7.4s, v15.4s\n" - "add x28, x28, #4\n" - "fmla v0.4s, v7.4s, v10.4s\n" - "ldr s23, [%[wbptr], #20]\n" - "movi v25.16b, #0\n" - "ldr s18, [x28]\n" - "fmla v2.4s, v5.4s, v14.4s\n" - "subs x25, x25, #1\n" - "fmla v0.4s, v17.4s, v21.4s\n" - "ldr s10, [%[wbptr], #12]\n" - "fmov v26.4s, #6.0\n" - "fmax v3.4s, v3.4s, v25.4s\n" - "fmax v2.4s, v2.4s, v25.4s\n" - "fmax v1.4s, v1.4s, v25.4s\n" - "fmla v0.4s, v5.4s, v15.4s\n" - "ldr s21, [%[wbptr], #32]\n" - "fmin v3.4s, v3.4s, v26.4s\n" - "fmin v2.4s, v2.4s, v26.4s\n" - "fmin v1.4s, v1.4s, v26.4s\n" - "str s3, [%[outptr0]]\n" - "str s2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v4.4s, v14.4s\n" - "str s1, [x24]\n" - "mov v3.16b, v19.16b\n" - "mov v1.16b, v19.16b\n" - "ldr s15, [%[wbptr], #24]\n" - "fmax v0.4s, v0.4s, v25.4s\n" - "ldr s14, [%[wbptr], #36]\n" - "mov v2.16b, v19.16b\n" - "add %[outptr0], %[outptr0], #4\n" - "fmin v0.4s, v0.4s, v26.4s\n" - "fmla v3.4s, v16.4s, v12.4s\n" - "fmla v1.4s, v28.4s, v12.4s\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "str s0, [x24, %[output_col_stride1]]\n" - "mov v0.16b, v19.16b\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "add x24, x24, #4\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "bne 5b\n" - "6:\n" - "fmla v3.4s, v24.4s, v20.4s\n" - "ldr s25, [x21, x23]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "ldr s28, [%[inptr0], x22]\n" - "fmla v2.4s, v8.4s, v13.4s\n" - "ldr s24, [x28, %[input_col_stride1]]\n" - "fmla v3.4s, v8.4s, v23.4s\n" - "ldr s27, [x27, x23]\n" - "fmla v1.4s, v8.4s, v11.4s\n" - "ldr s7, [x21, x22]\n" - "fmla v0.4s, v8.4s, v12.4s\n" - "ldr s17, [x28, x23]\n" - "fmla v3.4s, v9.4s, v10.4s\n" - "ldr s5, [x27, x22]\n" - "fmla v2.4s, v9.4s, v11.4s\n" - "ldr s4, [x28, x22]\n" - "fmla v1.4s, v18.4s, v20.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v3.4s, v6.4s, v21.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v2.4s, v6.4s, v20.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v1.4s, v6.4s, v23.4s\n" - "add x21, x21, #4\n" - "fmla v0.4s, v6.4s, v13.4s\n" - "add x27, x27, #4\n" - "fmla v3.4s, v25.4s, v15.4s\n" - "add x28, x28, #4\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "fmla v2.4s, v25.4s, v23.4s\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "movi v25.16b, #0\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "fmov v26.4s, #6.0\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "fmla v1.4s, v24.4s, v21.4s\n" - "fmla v0.4s, v24.4s, v20.4s\n" - "fmax v3.4s, v3.4s, v25.4s\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "fmla v2.4s, v27.4s, v21.4s\n" - "fmla v0.4s, v27.4s, v23.4s\n" - "fmin v3.4s, v3.4s, v26.4s\n" - "str s3, [%[outptr0]]\n" - "fmla v2.4s, v7.4s, v15.4s\n" - "fmla v0.4s, v7.4s, v10.4s\n" - "fmla v1.4s, v17.4s, v14.4s\n" - "fmla v2.4s, v5.4s, v14.4s\n" - "fmla v0.4s, v17.4s, v21.4s\n" - "fmax v1.4s, v1.4s, v25.4s\n" - "fmax v2.4s, v2.4s, v25.4s\n" - "fmla v0.4s, v5.4s, v15.4s\n" - "fmin v1.4s, v1.4s, v26.4s\n" - "fmin v2.4s, v2.4s, v26.4s\n" - "str s1, [x24]\n" - "str s2, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v4.4s, v14.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmax v0.4s, v0.4s, v25.4s\n" - "fmin v0.4s, v0.4s, v26.4s\n" - "str s0, [x24, %[output_col_stride1]]\n" - "add x24, x24, #4\n" - "7:\n" - : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -#endif // __aarch64__ - -template class DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float, float>; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp deleted file mode 100644 index 2554436172..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_2x2_3x3_2x2_fp32_fp32.cpp +++ /dev/null @@ -1,2809 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "impl_fp32_fp32.hpp" - -namespace depthwise -{ - -using namespace neon_convolution_kernels; -using Conv = DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>; - -#ifdef __aarch64__ -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x23, %[inptr0], %[input_row_stride]\n" - "add x19, %[input_col_stride1], %[input_col_stride1]\n" - "add x22, %[outptr0], %[output_row_stride]\n" - "add x24, x23, %[input_row_stride]\n" - "add x20, x19, %[input_col_stride1]\n" - "and x27, %[n_channels], #3\n" - "add x25, x24, %[input_row_stride]\n" - "add x21, x20, %[input_col_stride1]\n" - "lsr x28, %[n_channels], #2\n" - "add x26, x25, %[input_row_stride]\n" - "cbz x28, 4f\n" - "1:\n" - "ldr q14, [%[wbptr]]\n" - "subs x28, x28, #1\n" - "mov v12.16b, v14.16b\n" - "ldr q8, [%[wbptr], #16]\n" - "mov v10.16b, v14.16b\n" - "ldr q7, [%[wbptr], #32]\n" - "mov v11.16b, v14.16b\n" - "ldr q6, [%[wbptr], #48]\n" - "mov v9.16b, v14.16b\n" - "ldr q5, [%[wbptr], #64]\n" - "ldr q4, [%[wbptr], #80]\n" - "ldr q3, [%[wbptr], #96]\n" - "ldr q2, [%[wbptr], #112]\n" - "ldr q1, [%[wbptr], #128]\n" - "ldr q0, [%[wbptr], #144]\n" - "ldr q15, [%[inptr0]]\n" - "fmla v12.4s, v15.4s, v8.4s\n" - "ldr q20, [x23]\n" - "ldr q13, [%[inptr0], %[input_col_stride1]]\n" - "ldr q17, [x24]\n" - "fmla v10.4s, v17.4s, v8.4s\n" - "ldr q16, [x23, %[input_col_stride1]]\n" - "fmla v12.4s, v20.4s, v5.4s\n" - "ldr q18, [%[inptr0], x19]\n" - "ldr q14, [x25]\n" - "ldr q15, [x24, %[input_col_stride1]]\n" - "fmla v12.4s, v13.4s, v7.4s\n" - "fmla v12.4s, v17.4s, v2.4s\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "beq 3f\n" - "2:\n" - "fmla v11.4s, v18.4s, v8.4s\n" - "ldr q19, [x23, x19]\n" - "fmla v10.4s, v14.4s, v5.4s\n" - "ldr q20, [%[inptr0], x20]\n" - "fmla v12.4s, v15.4s, v1.4s\n" - "ldr q14, [x26]\n" - "fmla v11.4s, v19.4s, v5.4s\n" - "ldr q13, [x25, %[input_col_stride1]]\n" - "fmla v10.4s, v15.4s, v7.4s\n" - "ldr q17, [x24, x19]\n" - "fmla v12.4s, v19.4s, v3.4s\n" - "ldr q19, [x23, x20]\n" - "fmla v11.4s, v20.4s, v7.4s\n" - "ldr q18, [%[inptr0], x21]\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "ldr q16, [x26, %[input_col_stride1]]\n" - "fmla v12.4s, v17.4s, v0.4s\n" - "ldr q14, [x25, x19]\n" - "fmla v11.4s, v17.4s, v2.4s\n" - "ldr q15, [x24, x20]\n" - "fmla v10.4s, v13.4s, v4.4s\n" - "ldr q13, [x23, x21]\n" - "str q12, [%[outptr0]]\n" - "fmla v9.4s, v17.4s, v8.4s\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr q12, [x26, x19]\n" - "fmla v10.4s, v17.4s, v6.4s\n" - "ldr q20, [x25, x20]\n" - "fmla v9.4s, v14.4s, v5.4s\n" - "ldr q17, [x24, x21]\n" - "fmla v11.4s, v18.4s, v6.4s\n" - "ldr q19, [x26, x20]\n" - "fmla v10.4s, v16.4s, v1.4s\n" - "ldr q18, [x25, x21]\n" - "fmla v9.4s, v15.4s, v7.4s\n" - "ldr q16, [x26, x21]\n" - "fmla v11.4s, v15.4s, v1.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "ldr q14, [%[wbptr]]\n" - "fmla v9.4s, v12.4s, v2.4s\n" - "ldr q8, [%[wbptr], #16]\n" - "fmla v11.4s, v13.4s, v3.4s\n" - "ldr q7, [%[wbptr], #32]\n" - "fmla v10.4s, v12.4s, v0.4s\n" - "ldr q5, [%[wbptr], #64]\n" - "fmla v9.4s, v20.4s, v4.4s\n" - "ldr q2, [%[wbptr], #112]\n" - "fmla v11.4s, v17.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "str q10, [x22]\n" - "mov v12.16b, v14.16b\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "ldr q4, [%[wbptr], #80]\n" - "str q11, [%[outptr0], %[output_col_stride1]]\n" - "mov v10.16b, v14.16b\n" - "mov v11.16b, v14.16b\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "ldr q6, [%[wbptr], #48]\n" - "ldr q15, [%[inptr0]]\n" - "add x23, x23, #16\n" - "fmla v12.4s, v15.4s, v8.4s\n" - "ldr q20, [x23]\n" - "fmla v9.4s, v18.4s, v3.4s\n" - "ldr q1, [%[wbptr], #128]\n" - "ldr q13, [%[inptr0], %[input_col_stride1]]\n" - "add x24, x24, #16\n" - "fmla v12.4s, v20.4s, v5.4s\n" - "ldr q17, [x24]\n" - "fmla v9.4s, v16.4s, v0.4s\n" - "ldr q3, [%[wbptr], #96]\n" - "fmla v10.4s, v17.4s, v8.4s\n" - "ldr q16, [x23, %[input_col_stride1]]\n" - "fmla v12.4s, v13.4s, v7.4s\n" - "ldr q18, [%[inptr0], x19]\n" - "str q9, [x22, %[output_col_stride1]]\n" - "add x25, x25, #16\n" - "mov v9.16b, v14.16b\n" - "ldr q0, [%[wbptr], #144]\n" - "fmla v12.4s, v17.4s, v2.4s\n" - "ldr q14, [x25]\n" - "ldr q15, [x24, %[input_col_stride1]]\n" - "add x26, x26, #16\n" - "add %[outptr0], %[outptr0], #16\n" - "add x22, x22, #16\n" - "subs x28, x28, #1\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "bne 2b\n" - "3:\n" - "fmla v11.4s, v18.4s, v8.4s\n" - "ldr q19, [x23, x19]\n" - "fmla v10.4s, v14.4s, v5.4s\n" - "ldr q20, [%[inptr0], x20]\n" - "fmla v12.4s, v15.4s, v1.4s\n" - "ldr q14, [x26]\n" - "fmla v11.4s, v19.4s, v5.4s\n" - "ldr q13, [x25, %[input_col_stride1]]\n" - "fmla v10.4s, v15.4s, v7.4s\n" - "ldr q17, [x24, x19]\n" - "fmla v12.4s, v19.4s, v3.4s\n" - "ldr q19, [x23, x20]\n" - "fmla v11.4s, v20.4s, v7.4s\n" - "ldr q18, [%[inptr0], x21]\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "ldr q16, [x26, %[input_col_stride1]]\n" - "fmla v12.4s, v17.4s, v0.4s\n" - "ldr q14, [x25, x19]\n" - "fmla v11.4s, v17.4s, v2.4s\n" - "ldr q15, [x24, x20]\n" - "fmla v10.4s, v13.4s, v4.4s\n" - "ldr q13, [x23, x21]\n" - "str q12, [%[outptr0]]\n" - "fmla v9.4s, v17.4s, v8.4s\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr q12, [x26, x19]\n" - "fmla v10.4s, v17.4s, v6.4s\n" - "ldr q20, [x25, x20]\n" - "fmla v9.4s, v14.4s, v5.4s\n" - "ldr q17, [x24, x21]\n" - "fmla v11.4s, v18.4s, v6.4s\n" - "ldr q19, [x26, x20]\n" - "fmla v10.4s, v16.4s, v1.4s\n" - "ldr q18, [x25, x21]\n" - "fmla v9.4s, v15.4s, v7.4s\n" - "ldr q16, [x26, x21]\n" - "fmla v11.4s, v15.4s, v1.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v9.4s, v12.4s, v2.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v11.4s, v13.4s, v3.4s\n" - "add x23, x23, #16\n" - "fmla v10.4s, v12.4s, v0.4s\n" - "add x24, x24, #16\n" - "fmla v9.4s, v20.4s, v4.4s\n" - "add x25, x25, #16\n" - "fmla v11.4s, v17.4s, v0.4s\n" - "add x26, x26, #16\n" - "str q10, [x22]\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "str q11, [%[outptr0], %[output_col_stride1]]\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "fmla v9.4s, v18.4s, v3.4s\n" - "fmla v9.4s, v16.4s, v0.4s\n" - "str q9, [x22, %[output_col_stride1]]\n" - "add x22, x22, #16\n" - "4:\n" - "cbz x27, 7f\n" - "ldr s14, [%[wbptr]]\n" - "mov v12.16b, v14.16b\n" - "ldr s8, [%[wbptr], #4]\n" - "mov v10.16b, v14.16b\n" - "ldr s7, [%[wbptr], #8]\n" - "mov v11.16b, v14.16b\n" - "ldr s6, [%[wbptr], #12]\n" - "mov v9.16b, v14.16b\n" - "ldr s5, [%[wbptr], #16]\n" - "ldr s4, [%[wbptr], #20]\n" - "subs x27, x27, #1\n" - "ldr s3, [%[wbptr], #24]\n" - "ldr s2, [%[wbptr], #28]\n" - "ldr s1, [%[wbptr], #32]\n" - "ldr s0, [%[wbptr], #36]\n" - "ldr s15, [%[inptr0]]\n" - "ldr s20, [x23]\n" - "fmla v12.4s, v15.4s, v8.4s\n" - "ldr s13, [%[inptr0], %[input_col_stride1]]\n" - "ldr s17, [x24]\n" - "ldr s16, [x23, %[input_col_stride1]]\n" - "fmla v10.4s, v17.4s, v8.4s\n" - "ldr s18, [%[inptr0], x19]\n" - "fmla v12.4s, v20.4s, v5.4s\n" - "ldr s14, [x25]\n" - "ldr s15, [x24, %[input_col_stride1]]\n" - "fmla v12.4s, v13.4s, v7.4s\n" - "fmla v12.4s, v17.4s, v2.4s\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "beq 6f\n" - "5:\n" - "fmla v11.4s, v18.4s, v8.4s\n" - "ldr s19, [x23, x19]\n" - "fmla v10.4s, v14.4s, v5.4s\n" - "ldr s20, [%[inptr0], x20]\n" - "fmla v12.4s, v15.4s, v1.4s\n" - "ldr s14, [x26]\n" - "fmla v11.4s, v19.4s, v5.4s\n" - "ldr s13, [x25, %[input_col_stride1]]\n" - "fmla v10.4s, v15.4s, v7.4s\n" - "ldr s17, [x24, x19]\n" - "fmla v12.4s, v19.4s, v3.4s\n" - "ldr s19, [x23, x20]\n" - "fmla v11.4s, v20.4s, v7.4s\n" - "ldr s18, [%[inptr0], x21]\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "ldr s16, [x26, %[input_col_stride1]]\n" - "fmla v12.4s, v17.4s, v0.4s\n" - "ldr s14, [x25, x19]\n" - "fmla v11.4s, v17.4s, v2.4s\n" - "ldr s15, [x24, x20]\n" - "fmla v10.4s, v13.4s, v4.4s\n" - "ldr s13, [x23, x21]\n" - "str s12, [%[outptr0]]\n" - "fmla v9.4s, v17.4s, v8.4s\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr s12, [x26, x19]\n" - "fmla v10.4s, v17.4s, v6.4s\n" - "ldr s20, [x25, x20]\n" - "fmla v9.4s, v14.4s, v5.4s\n" - "ldr s17, [x24, x21]\n" - "fmla v11.4s, v18.4s, v6.4s\n" - "ldr s19, [x26, x20]\n" - "fmla v10.4s, v16.4s, v1.4s\n" - "ldr s18, [x25, x21]\n" - "fmla v9.4s, v15.4s, v7.4s\n" - "ldr s16, [x26, x21]\n" - "fmla v11.4s, v15.4s, v1.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "ldr s14, [%[wbptr]]\n" - "fmla v9.4s, v12.4s, v2.4s\n" - "ldr s8, [%[wbptr], #4]\n" - "fmla v11.4s, v13.4s, v3.4s\n" - "ldr s7, [%[wbptr], #8]\n" - "fmla v10.4s, v12.4s, v0.4s\n" - "ldr s5, [%[wbptr], #16]\n" - "fmla v9.4s, v20.4s, v4.4s\n" - "ldr s2, [%[wbptr], #28]\n" - "fmla v11.4s, v17.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "str s10, [x22]\n" - "mov v12.16b, v14.16b\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "ldr s4, [%[wbptr], #20]\n" - "str s11, [%[outptr0], %[output_col_stride1]]\n" - "mov v10.16b, v14.16b\n" - "mov v11.16b, v14.16b\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "ldr s6, [%[wbptr], #12]\n" - "ldr s15, [%[inptr0]]\n" - "add x23, x23, #4\n" - "fmla v12.4s, v15.4s, v8.4s\n" - "ldr s20, [x23]\n" - "fmla v9.4s, v18.4s, v3.4s\n" - "ldr s1, [%[wbptr], #32]\n" - "ldr s13, [%[inptr0], %[input_col_stride1]]\n" - "add x24, x24, #4\n" - "fmla v12.4s, v20.4s, v5.4s\n" - "ldr s17, [x24]\n" - "fmla v9.4s, v16.4s, v0.4s\n" - "ldr s3, [%[wbptr], #24]\n" - "fmla v10.4s, v17.4s, v8.4s\n" - "ldr s16, [x23, %[input_col_stride1]]\n" - "fmla v12.4s, v13.4s, v7.4s\n" - "ldr s18, [%[inptr0], x19]\n" - "str s9, [x22, %[output_col_stride1]]\n" - "add x25, x25, #4\n" - "mov v9.16b, v14.16b\n" - "ldr s0, [%[wbptr], #36]\n" - "fmla v12.4s, v17.4s, v2.4s\n" - "ldr s14, [x25]\n" - "ldr s15, [x24, %[input_col_stride1]]\n" - "add x26, x26, #4\n" - "add %[outptr0], %[outptr0], #4\n" - "add x22, x22, #4\n" - "subs x27, x27, #1\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "bne 5b\n" - "6:\n" - "fmla v11.4s, v18.4s, v8.4s\n" - "ldr s19, [x23, x19]\n" - "fmla v10.4s, v14.4s, v5.4s\n" - "ldr s20, [%[inptr0], x20]\n" - "fmla v12.4s, v15.4s, v1.4s\n" - "ldr s14, [x26]\n" - "fmla v11.4s, v19.4s, v5.4s\n" - "ldr s13, [x25, %[input_col_stride1]]\n" - "fmla v10.4s, v15.4s, v7.4s\n" - "ldr s17, [x24, x19]\n" - "fmla v12.4s, v19.4s, v3.4s\n" - "ldr s19, [x23, x20]\n" - "fmla v11.4s, v20.4s, v7.4s\n" - "ldr s18, [%[inptr0], x21]\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "ldr s16, [x26, %[input_col_stride1]]\n" - "fmla v12.4s, v17.4s, v0.4s\n" - "ldr s14, [x25, x19]\n" - "fmla v11.4s, v17.4s, v2.4s\n" - "ldr s15, [x24, x20]\n" - "fmla v10.4s, v13.4s, v4.4s\n" - "ldr s13, [x23, x21]\n" - "str s12, [%[outptr0]]\n" - "fmla v9.4s, v17.4s, v8.4s\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr s12, [x26, x19]\n" - "fmla v10.4s, v17.4s, v6.4s\n" - "ldr s20, [x25, x20]\n" - "fmla v9.4s, v14.4s, v5.4s\n" - "ldr s17, [x24, x21]\n" - "fmla v11.4s, v18.4s, v6.4s\n" - "ldr s19, [x26, x20]\n" - "fmla v10.4s, v16.4s, v1.4s\n" - "ldr s18, [x25, x21]\n" - "fmla v9.4s, v15.4s, v7.4s\n" - "ldr s16, [x26, x21]\n" - "fmla v11.4s, v15.4s, v1.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v9.4s, v12.4s, v2.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v11.4s, v13.4s, v3.4s\n" - "add x23, x23, #4\n" - "fmla v10.4s, v12.4s, v0.4s\n" - "add x24, x24, #4\n" - "fmla v9.4s, v20.4s, v4.4s\n" - "add x25, x25, #4\n" - "fmla v11.4s, v17.4s, v0.4s\n" - "add x26, x26, #4\n" - "str s10, [x22]\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "str s11, [%[outptr0], %[output_col_stride1]]\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "fmla v9.4s, v18.4s, v3.4s\n" - "fmla v9.4s, v16.4s, v0.4s\n" - "str s9, [x22, %[output_col_stride1]]\n" - "add x22, x22, #4\n" - "7:\n" - : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr) - : [n_channels] "r" ((long) n_channels), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void *weight_bias_ptr, - const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float *outptrs[Base::output_tile_rows][Base::output_tile_cols] -) -{ - __asm __volatile( - "mov x23, xzr\n" - "mov x24, xzr\n" - "and x25, %[n_channels], #3\n" - "lsr x26, %[n_channels], #2\n" - "cbz x26, 4f\n" - "1:\n" - "ldr q13, [%[wbptr]]\n" - "ldr x19, [%[inptrs], 0]\n" - "mov v10.16b, v13.16b\n" - "ldr q12, [%[wbptr], #16]\n" - "mov v8.16b, v13.16b\n" - "ldr q6, [%[wbptr], #32]\n" - "mov v9.16b, v13.16b\n" - "ldr q5, [%[wbptr], #48]\n" - "mov v7.16b, v13.16b\n" - "ldr q11, [%[wbptr], #64]\n" - "ldr q4, [%[wbptr], #80]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr q3, [%[wbptr], #96]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr q2, [%[wbptr], #112]\n" - "ldr x27, [%[inptrs], 120]\n" - "ldr q1, [%[wbptr], #128]\n" - "subs x26, x26, #1\n" - "ldr q0, [%[wbptr], #144]\n" - "ldr q14, [x19, x23]\n" - "fmla v10.4s, v14.4s, v12.4s\n" - "ldr q18, [x20, x23]\n" - "ldr q14, [x21, x23]\n" - "ldr x19, [%[inptrs], 8]\n" - "ldr q16, [x27, x23]\n" - "ldr x20, [%[inptrs], 48]\n" - "ldr q19, [x19, x23]\n" - "ldr x21, [%[inptrs], 88]\n" - "fmla v10.4s, v18.4s, v11.4s\n" - "ldr q15, [x20, x23]\n" - "ldr q18, [x21, x23]\n" - "ldr x19, [%[inptrs], 16]\n" - "ldr q13, [x19, x23]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "beq 3f\n" - "2:\n" - "fmla v8.4s, v14.4s, v12.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v10.4s, v15.4s, v4.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v9.4s, v13.4s, v12.4s\n" - "ldr q14, [x20, x23]\n" - "ldr q17, [x19, x23]\n" - "ldr x22, [%[inptrs], 160]\n" - "fmla v8.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 128]\n" - "fmla v10.4s, v13.4s, v5.4s\n" - "ldr q15, [x22, x23]\n" - "fmla v9.4s, v14.4s, v11.4s\n" - "ldr q19, [x27, x23]\n" - "ldr x21, [%[inptrs], 96]\n" - "ldr x20, [%[inptrs], 64]\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v8.4s, v18.4s, v6.4s\n" - "ldr x22, [%[inptrs], 168]\n" - "fmla v10.4s, v18.4s, v1.4s\n" - "ldr q13, [x21, x23]\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "ldr q18, [x20, x23]\n" - "fmla v7.4s, v13.4s, v12.4s\n" - "ldr q17, [x19, x23]\n" - "fmla v8.4s, v15.4s, v2.4s\n" - "ldr q15, [x22, x23]\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "ldr x27, [%[inptrs], 136]\n" - "fmla v9.4s, v13.4s, v2.4s\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr q16, [x27, x23]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v8.4s, v19.4s, v4.4s\n" - "ldr q19, [x21, x23]\n" - "fmla v10.4s, v13.4s, v0.4s\n" - "ldr q12, [x20, x23]\n" - "fmla v9.4s, v18.4s, v4.4s\n" - "ldr x22, [%[inptrs], 176]\n" - "fmla v7.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 144]\n" - "fmla v8.4s, v13.4s, v5.4s\n" - "ldr q11, [x22, x23]\n" - "ldr q13, [x27, x23]\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v9.4s, v17.4s, v5.4s\n" - "ldr x22, [%[inptrs], 184]\n" - "fmla v7.4s, v19.4s, v6.4s\n" - "ldr q14, [x21, x23]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr q17, [x22, x23]\n" - "ldr x27, [%[inptrs], 152]\n" - "ldr x22, [%[inptrs], 192]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str q10, [x21, x24]\n" - "fmla v7.4s, v11.4s, v2.4s\n" - "fmla v8.4s, v16.4s, v3.4s\n" - "ldr q16, [x27, x23]\n" - "ldr q15, [x22, x23]\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v9.4s, v12.4s, v3.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v7.4s, v13.4s, v4.4s\n" - "ldr q13, [%[wbptr]]\n" - "fmla v8.4s, v11.4s, v0.4s\n" - "ldr q12, [%[wbptr], #16]\n" - "mov v10.16b, v13.16b\n" - "ldr q6, [%[wbptr], #32]\n" - "fmla v9.4s, v14.4s, v0.4s\n" - "ldr q11, [%[wbptr], #64]\n" - "fmla v7.4s, v14.4s, v5.4s\n" - "ldr q4, [%[wbptr], #80]\n" - "str q8, [x28, x24]\n" - "add x23, x23, #16\n" - "mov v8.16b, v13.16b\n" - "ldr q2, [%[wbptr], #112]\n" - "str q9, [x21, x24]\n" - "ldr x28, [%[outptrs], 24]\n" - "fmla v7.4s, v17.4s, v1.4s\n" - "ldr q5, [%[wbptr], #48]\n" - "mov v9.16b, v13.16b\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "ldr x19, [%[inptrs], 0]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr x27, [%[inptrs], 120]\n" - "subs x26, x26, #1\n" - "fmla v7.4s, v16.4s, v3.4s\n" - "ldr q1, [%[wbptr], #128]\n" - "ldr q14, [x19, x23]\n" - "fmla v10.4s, v14.4s, v12.4s\n" - "ldr q18, [x20, x23]\n" - "ldr q14, [x21, x23]\n" - "ldr x19, [%[inptrs], 8]\n" - "fmla v7.4s, v15.4s, v0.4s\n" - "ldr q3, [%[wbptr], #96]\n" - "ldr q19, [x19, x23]\n" - "ldr x20, [%[inptrs], 48]\n" - "fmla v10.4s, v18.4s, v11.4s\n" - "ldr q16, [x27, x23]\n" - "ldr q15, [x20, x23]\n" - "ldr x19, [%[inptrs], 16]\n" - "str q7, [x28, x24]\n" - "ldr x21, [%[inptrs], 88]\n" - "mov v7.16b, v13.16b\n" - "ldr q0, [%[wbptr], #144]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "ldr q13, [x19, x23]\n" - "ldr q18, [x21, x23]\n" - "add x24, x24, #16\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "bne 2b\n" - "3:\n" - "fmla v8.4s, v14.4s, v12.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v10.4s, v15.4s, v4.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v9.4s, v13.4s, v12.4s\n" - "ldr q14, [x20, x23]\n" - "ldr q17, [x19, x23]\n" - "ldr x22, [%[inptrs], 160]\n" - "fmla v8.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 128]\n" - "fmla v10.4s, v13.4s, v5.4s\n" - "ldr q15, [x22, x23]\n" - "fmla v9.4s, v14.4s, v11.4s\n" - "ldr q19, [x27, x23]\n" - "ldr x21, [%[inptrs], 96]\n" - "ldr x20, [%[inptrs], 64]\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v8.4s, v18.4s, v6.4s\n" - "ldr x22, [%[inptrs], 168]\n" - "fmla v10.4s, v18.4s, v1.4s\n" - "ldr q13, [x21, x23]\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "ldr q18, [x20, x23]\n" - "fmla v7.4s, v13.4s, v12.4s\n" - "ldr q17, [x19, x23]\n" - "fmla v8.4s, v15.4s, v2.4s\n" - "ldr q15, [x22, x23]\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "ldr x27, [%[inptrs], 136]\n" - "fmla v9.4s, v13.4s, v2.4s\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr q16, [x27, x23]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v8.4s, v19.4s, v4.4s\n" - "ldr q19, [x21, x23]\n" - "fmla v10.4s, v13.4s, v0.4s\n" - "ldr q12, [x20, x23]\n" - "fmla v9.4s, v18.4s, v4.4s\n" - "ldr x22, [%[inptrs], 176]\n" - "fmla v7.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 144]\n" - "fmla v8.4s, v13.4s, v5.4s\n" - "ldr q11, [x22, x23]\n" - "ldr q13, [x27, x23]\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v9.4s, v17.4s, v5.4s\n" - "ldr x22, [%[inptrs], 184]\n" - "fmla v7.4s, v19.4s, v6.4s\n" - "ldr q14, [x21, x23]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr q17, [x22, x23]\n" - "ldr x27, [%[inptrs], 152]\n" - "ldr x22, [%[inptrs], 192]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str q10, [x21, x24]\n" - "fmla v7.4s, v11.4s, v2.4s\n" - "fmla v8.4s, v16.4s, v3.4s\n" - "ldr q16, [x27, x23]\n" - "ldr q15, [x22, x23]\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v9.4s, v12.4s, v3.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v7.4s, v13.4s, v4.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v8.4s, v11.4s, v0.4s\n" - "add x23, x23, #16\n" - "fmla v9.4s, v14.4s, v0.4s\n" - "fmla v7.4s, v14.4s, v5.4s\n" - "str q8, [x28, x24]\n" - "ldr x28, [%[outptrs], 24]\n" - "str q9, [x21, x24]\n" - "fmla v7.4s, v17.4s, v1.4s\n" - "fmla v7.4s, v16.4s, v3.4s\n" - "fmla v7.4s, v15.4s, v0.4s\n" - "str q7, [x28, x24]\n" - "add x24, x24, #16\n" - "4:\n" - "cbz x25, 7f\n" - "ldr s13, [%[wbptr]]\n" - "mov v10.16b, v13.16b\n" - "ldr s12, [%[wbptr], #4]\n" - "mov v8.16b, v13.16b\n" - "ldr s6, [%[wbptr], #8]\n" - "mov v9.16b, v13.16b\n" - "ldr s5, [%[wbptr], #12]\n" - "mov v7.16b, v13.16b\n" - "ldr s11, [%[wbptr], #16]\n" - "ldr s4, [%[wbptr], #20]\n" - "ldr x19, [%[inptrs], 0]\n" - "ldr s3, [%[wbptr], #24]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr s2, [%[wbptr], #28]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr s1, [%[wbptr], #32]\n" - "ldr x27, [%[inptrs], 120]\n" - "ldr s0, [%[wbptr], #36]\n" - "subs x25, x25, #1\n" - "ldr s14, [x19, x23]\n" - "ldr s18, [x20, x23]\n" - "fmla v10.4s, v14.4s, v12.4s\n" - "ldr s14, [x21, x23]\n" - "ldr s16, [x27, x23]\n" - "ldr x19, [%[inptrs], 8]\n" - "ldr x20, [%[inptrs], 48]\n" - "ldr x21, [%[inptrs], 88]\n" - "ldr s19, [x19, x23]\n" - "fmla v10.4s, v18.4s, v11.4s\n" - "ldr s15, [x20, x23]\n" - "ldr s18, [x21, x23]\n" - "ldr x19, [%[inptrs], 16]\n" - "ldr s13, [x19, x23]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "beq 6f\n" - "5:\n" - "fmla v8.4s, v14.4s, v12.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v10.4s, v15.4s, v4.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v9.4s, v13.4s, v12.4s\n" - "ldr s14, [x20, x23]\n" - "ldr s17, [x19, x23]\n" - "ldr x22, [%[inptrs], 160]\n" - "fmla v8.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 128]\n" - "fmla v10.4s, v13.4s, v5.4s\n" - "ldr s15, [x22, x23]\n" - "fmla v9.4s, v14.4s, v11.4s\n" - "ldr s19, [x27, x23]\n" - "ldr x21, [%[inptrs], 96]\n" - "ldr x20, [%[inptrs], 64]\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v8.4s, v18.4s, v6.4s\n" - "ldr x22, [%[inptrs], 168]\n" - "fmla v10.4s, v18.4s, v1.4s\n" - "ldr s13, [x21, x23]\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "ldr s18, [x20, x23]\n" - "fmla v7.4s, v13.4s, v12.4s\n" - "ldr s17, [x19, x23]\n" - "fmla v8.4s, v15.4s, v2.4s\n" - "ldr s15, [x22, x23]\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "ldr x27, [%[inptrs], 136]\n" - "fmla v9.4s, v13.4s, v2.4s\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr s16, [x27, x23]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v8.4s, v19.4s, v4.4s\n" - "ldr s19, [x21, x23]\n" - "fmla v10.4s, v13.4s, v0.4s\n" - "ldr s12, [x20, x23]\n" - "fmla v9.4s, v18.4s, v4.4s\n" - "ldr x22, [%[inptrs], 176]\n" - "fmla v7.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 144]\n" - "fmla v8.4s, v13.4s, v5.4s\n" - "ldr s11, [x22, x23]\n" - "ldr s13, [x27, x23]\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v9.4s, v17.4s, v5.4s\n" - "ldr x22, [%[inptrs], 184]\n" - "fmla v7.4s, v19.4s, v6.4s\n" - "ldr s14, [x21, x23]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr s17, [x22, x23]\n" - "ldr x27, [%[inptrs], 152]\n" - "ldr x22, [%[inptrs], 192]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str s10, [x21, x24]\n" - "fmla v7.4s, v11.4s, v2.4s\n" - "fmla v8.4s, v16.4s, v3.4s\n" - "ldr s16, [x27, x23]\n" - "ldr s15, [x22, x23]\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v9.4s, v12.4s, v3.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v7.4s, v13.4s, v4.4s\n" - "ldr s13, [%[wbptr]]\n" - "fmla v8.4s, v11.4s, v0.4s\n" - "ldr s12, [%[wbptr], #4]\n" - "mov v10.16b, v13.16b\n" - "ldr s6, [%[wbptr], #8]\n" - "fmla v9.4s, v14.4s, v0.4s\n" - "ldr s11, [%[wbptr], #16]\n" - "fmla v7.4s, v14.4s, v5.4s\n" - "ldr s4, [%[wbptr], #20]\n" - "str s8, [x28, x24]\n" - "add x23, x23, #4\n" - "mov v8.16b, v13.16b\n" - "ldr s2, [%[wbptr], #28]\n" - "str s9, [x21, x24]\n" - "ldr x28, [%[outptrs], 24]\n" - "fmla v7.4s, v17.4s, v1.4s\n" - "ldr s5, [%[wbptr], #12]\n" - "mov v9.16b, v13.16b\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "ldr x19, [%[inptrs], 0]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr x27, [%[inptrs], 120]\n" - "subs x25, x25, #1\n" - "fmla v7.4s, v16.4s, v3.4s\n" - "ldr s1, [%[wbptr], #32]\n" - "ldr s14, [x19, x23]\n" - "fmla v10.4s, v14.4s, v12.4s\n" - "ldr s18, [x20, x23]\n" - "ldr s14, [x21, x23]\n" - "ldr x19, [%[inptrs], 8]\n" - "fmla v7.4s, v15.4s, v0.4s\n" - "ldr s3, [%[wbptr], #24]\n" - "ldr s19, [x19, x23]\n" - "ldr x20, [%[inptrs], 48]\n" - "fmla v10.4s, v18.4s, v11.4s\n" - "ldr s16, [x27, x23]\n" - "ldr s15, [x20, x23]\n" - "ldr x19, [%[inptrs], 16]\n" - "str s7, [x28, x24]\n" - "ldr x21, [%[inptrs], 88]\n" - "mov v7.16b, v13.16b\n" - "ldr s0, [%[wbptr], #36]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "ldr s13, [x19, x23]\n" - "ldr s18, [x21, x23]\n" - "add x24, x24, #4\n" - "fmla v10.4s, v14.4s, v2.4s\n" - "bne 5b\n" - "6:\n" - "fmla v8.4s, v14.4s, v12.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v10.4s, v15.4s, v4.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v9.4s, v13.4s, v12.4s\n" - "ldr s14, [x20, x23]\n" - "ldr s17, [x19, x23]\n" - "ldr x22, [%[inptrs], 160]\n" - "fmla v8.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 128]\n" - "fmla v10.4s, v13.4s, v5.4s\n" - "ldr s15, [x22, x23]\n" - "fmla v9.4s, v14.4s, v11.4s\n" - "ldr s19, [x27, x23]\n" - "ldr x21, [%[inptrs], 96]\n" - "ldr x20, [%[inptrs], 64]\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v8.4s, v18.4s, v6.4s\n" - "ldr x22, [%[inptrs], 168]\n" - "fmla v10.4s, v18.4s, v1.4s\n" - "ldr s13, [x21, x23]\n" - "fmla v9.4s, v17.4s, v6.4s\n" - "ldr s18, [x20, x23]\n" - "fmla v7.4s, v13.4s, v12.4s\n" - "ldr s17, [x19, x23]\n" - "fmla v8.4s, v15.4s, v2.4s\n" - "ldr s15, [x22, x23]\n" - "fmla v10.4s, v14.4s, v3.4s\n" - "ldr x27, [%[inptrs], 136]\n" - "fmla v9.4s, v13.4s, v2.4s\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr s16, [x27, x23]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v8.4s, v19.4s, v4.4s\n" - "ldr s19, [x21, x23]\n" - "fmla v10.4s, v13.4s, v0.4s\n" - "ldr s12, [x20, x23]\n" - "fmla v9.4s, v18.4s, v4.4s\n" - "ldr x22, [%[inptrs], 176]\n" - "fmla v7.4s, v16.4s, v11.4s\n" - "ldr x27, [%[inptrs], 144]\n" - "fmla v8.4s, v13.4s, v5.4s\n" - "ldr s11, [x22, x23]\n" - "ldr s13, [x27, x23]\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v9.4s, v17.4s, v5.4s\n" - "ldr x22, [%[inptrs], 184]\n" - "fmla v7.4s, v19.4s, v6.4s\n" - "ldr s14, [x21, x23]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr s17, [x22, x23]\n" - "ldr x27, [%[inptrs], 152]\n" - "ldr x22, [%[inptrs], 192]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str s10, [x21, x24]\n" - "fmla v7.4s, v11.4s, v2.4s\n" - "fmla v8.4s, v16.4s, v3.4s\n" - "ldr s16, [x27, x23]\n" - "ldr s15, [x22, x23]\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v9.4s, v12.4s, v3.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v7.4s, v13.4s, v4.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v8.4s, v11.4s, v0.4s\n" - "add x23, x23, #4\n" - "fmla v9.4s, v14.4s, v0.4s\n" - "fmla v7.4s, v14.4s, v5.4s\n" - "str s8, [x28, x24]\n" - "ldr x28, [%[outptrs], 24]\n" - "str s9, [x21, x24]\n" - "fmla v7.4s, v17.4s, v1.4s\n" - "fmla v7.4s, v16.4s, v3.4s\n" - "fmla v7.4s, v15.4s, v0.4s\n" - "str s7, [x28, x24]\n" - "add x24, x24, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr) - : [inptrs] "r" (inptrs), [n_channels] "r" ((long) n_channels), [outptrs] "r" (outptrs) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x24, %[inptr0], %[input_row_stride]\n" - "add x27, %[input_col_stride1], %[input_col_stride1]\n" - "add x19, %[outptr0], %[output_row_stride]\n" - "add x25, x24, %[input_row_stride]\n" - "add x23, x27, %[input_col_stride1]\n" - "and x20, %[n_channels], #3\n" - "add x28, x25, %[input_row_stride]\n" - "add x22, x23, %[input_col_stride1]\n" - "lsr x21, %[n_channels], #2\n" - "add x26, x28, %[input_row_stride]\n" - "cbz x21, 4f\n" - "1:\n" - "ldr q16, [%[wbptr]]\n" - "subs x21, x21, #1\n" - "mov v3.16b, v16.16b\n" - "ldr q4, [%[wbptr], #16]\n" - "mov v1.16b, v16.16b\n" - "ldr q5, [%[wbptr], #32]\n" - "mov v2.16b, v16.16b\n" - "ldr q12, [%[wbptr], #48]\n" - "mov v0.16b, v16.16b\n" - "ldr q11, [%[wbptr], #64]\n" - "ldr q10, [%[wbptr], #80]\n" - "ldr q6, [%[wbptr], #96]\n" - "ldr q9, [%[wbptr], #112]\n" - "ldr q8, [%[wbptr], #128]\n" - "ldr q7, [%[wbptr], #144]\n" - "ldr q21, [%[inptr0]]\n" - "fmla v3.4s, v21.4s, v4.4s\n" - "ldr q23, [x24]\n" - "ldr q19, [%[inptr0], %[input_col_stride1]]\n" - "ldr q14, [x25]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "ldr q13, [x24, %[input_col_stride1]]\n" - "fmla v3.4s, v23.4s, v11.4s\n" - "ldr q18, [%[inptr0], x27]\n" - "ldr q15, [x28]\n" - "ldr q22, [x25, %[input_col_stride1]]\n" - "fmla v3.4s, v19.4s, v5.4s\n" - "fmla v3.4s, v14.4s, v9.4s\n" - "beq 3f\n" - "2:\n" - "fmla v3.4s, v13.4s, v10.4s\n" - "ldr q17, [x24, x27]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr q20, [%[inptr0], x23]\n" - "fmla v1.4s, v15.4s, v11.4s\n" - "ldr q19, [x26]\n" - "fmla v3.4s, v18.4s, v12.4s\n" - "ldr q13, [x28, %[input_col_stride1]]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr q14, [x25, x27]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "ldr q15, [x24, x23]\n" - "fmla v3.4s, v22.4s, v8.4s\n" - "ldr q16, [%[inptr0], x22]\n" - "fmla v2.4s, v20.4s, v5.4s\n" - "ldr q20, [x26, %[input_col_stride1]]\n" - "fmla v1.4s, v19.4s, v9.4s\n" - "ldr q19, [x28, x27]\n" - "fmla v3.4s, v17.4s, v6.4s\n" - "ldr q21, [x25, x23]\n" - "fmla v2.4s, v14.4s, v9.4s\n" - "ldr q22, [x24, x22]\n" - "fmla v1.4s, v13.4s, v10.4s\n" - "ldr q23, [x26, x27]\n" - "fmla v3.4s, v14.4s, v7.4s\n" - "ldr q18, [x28, x23]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "ldr q13, [x25, x22]\n" - "fmla v1.4s, v14.4s, v12.4s\n" - "ldr q14, [x26, x23]\n" - "fmla v2.4s, v15.4s, v10.4s\n" - "ldr q17, [x28, x22]\n" - "fmla v0.4s, v19.4s, v11.4s\n" - "ldr q15, [x26, x22]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "ldr q16, [%[wbptr]]\n" - "fmla v0.4s, v21.4s, v5.4s\n" - "ldr q4, [%[wbptr], #16]\n" - "fmla v1.4s, v19.4s, v6.4s\n" - "ldr q11, [%[wbptr], #64]\n" - "fmla v2.4s, v21.4s, v8.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "ldr q5, [%[wbptr], #32]\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v2.4s, v22.4s, v6.4s\n" - "ldr q21, [%[inptr0]]\n" - "fmla v0.4s, v18.4s, v10.4s\n" - "ldr q9, [%[wbptr], #112]\n" - "movi v20.16b, #0\n" - "ldr q19, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v13.4s, v7.4s\n" - "ldr q18, [%[inptr0], x27]\n" - "fmla v0.4s, v13.4s, v12.4s\n" - "ldr q10, [%[wbptr], #80]\n" - "fmax v3.4s, v3.4s, v20.4s\n" - "add x24, x24, #16\n" - "fmax v2.4s, v2.4s, v20.4s\n" - "ldr q23, [x24]\n" - "str q3, [%[outptr0]]\n" - "fmla v0.4s, v14.4s, v8.4s\n" - "str q2, [%[outptr0], %[output_col_stride1]]\n" - "fmax v1.4s, v1.4s, v20.4s\n" - "mov v3.16b, v16.16b\n" - "ldr q12, [%[wbptr], #48]\n" - "str q1, [x19]\n" - "fmla v0.4s, v17.4s, v6.4s\n" - "mov v1.16b, v16.16b\n" - "ldr q8, [%[wbptr], #128]\n" - "mov v2.16b, v16.16b\n" - "ldr q13, [x24, %[input_col_stride1]]\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "ldr q6, [%[wbptr], #96]\n" - "fmla v3.4s, v21.4s, v4.4s\n" - "add x25, x25, #16\n" - "ldr q14, [x25]\n" - "add x28, x28, #16\n" - "fmax v0.4s, v0.4s, v20.4s\n" - "ldr q7, [%[wbptr], #144]\n" - "fmla v3.4s, v23.4s, v11.4s\n" - "ldr q15, [x28]\n" - "str q0, [x19, %[output_col_stride1]]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "mov v0.16b, v16.16b\n" - "ldr q22, [x25, %[input_col_stride1]]\n" - "fmla v3.4s, v19.4s, v5.4s\n" - "add x26, x26, #16\n" - "add %[outptr0], %[outptr0], #16\n" - "add x19, x19, #16\n" - "subs x21, x21, #1\n" - "fmla v3.4s, v14.4s, v9.4s\n" - "bne 2b\n" - "3:\n" - "fmla v3.4s, v13.4s, v10.4s\n" - "ldr q17, [x24, x27]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr q20, [%[inptr0], x23]\n" - "fmla v1.4s, v15.4s, v11.4s\n" - "ldr q19, [x26]\n" - "fmla v3.4s, v18.4s, v12.4s\n" - "ldr q13, [x28, %[input_col_stride1]]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr q14, [x25, x27]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "ldr q15, [x24, x23]\n" - "fmla v3.4s, v22.4s, v8.4s\n" - "ldr q16, [%[inptr0], x22]\n" - "fmla v2.4s, v20.4s, v5.4s\n" - "ldr q20, [x26, %[input_col_stride1]]\n" - "fmla v1.4s, v19.4s, v9.4s\n" - "ldr q19, [x28, x27]\n" - "fmla v3.4s, v17.4s, v6.4s\n" - "ldr q21, [x25, x23]\n" - "fmla v2.4s, v14.4s, v9.4s\n" - "ldr q22, [x24, x22]\n" - "fmla v1.4s, v13.4s, v10.4s\n" - "ldr q23, [x26, x27]\n" - "fmla v3.4s, v14.4s, v7.4s\n" - "ldr q18, [x28, x23]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "ldr q13, [x25, x22]\n" - "fmla v1.4s, v14.4s, v12.4s\n" - "ldr q14, [x26, x23]\n" - "fmla v2.4s, v15.4s, v10.4s\n" - "ldr q17, [x28, x22]\n" - "fmla v0.4s, v19.4s, v11.4s\n" - "ldr q15, [x26, x22]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v21.4s, v5.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v1.4s, v19.4s, v6.4s\n" - "add x24, x24, #16\n" - "fmla v2.4s, v21.4s, v8.4s\n" - "add x25, x25, #16\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "add x28, x28, #16\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "add x26, x26, #16\n" - "fmla v2.4s, v22.4s, v6.4s\n" - "movi v20.16b, #0\n" - "fmla v0.4s, v18.4s, v10.4s\n" - "fmax v3.4s, v3.4s, v20.4s\n" - "fmla v2.4s, v13.4s, v7.4s\n" - "fmax v1.4s, v1.4s, v20.4s\n" - "str q3, [%[outptr0]]\n" - "fmla v0.4s, v13.4s, v12.4s\n" - "str q1, [x19]\n" - "fmax v2.4s, v2.4s, v20.4s\n" - "fmla v0.4s, v14.4s, v8.4s\n" - "str q2, [%[outptr0], %[output_col_stride1]]\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v0.4s, v17.4s, v6.4s\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "fmax v0.4s, v0.4s, v20.4s\n" - "str q0, [x19, %[output_col_stride1]]\n" - "add x19, x19, #16\n" - "4:\n" - "cbz x20, 7f\n" - "ldr s16, [%[wbptr]]\n" - "mov v3.16b, v16.16b\n" - "ldr s4, [%[wbptr], #4]\n" - "mov v1.16b, v16.16b\n" - "ldr s5, [%[wbptr], #8]\n" - "mov v2.16b, v16.16b\n" - "ldr s12, [%[wbptr], #12]\n" - "mov v0.16b, v16.16b\n" - "ldr s11, [%[wbptr], #16]\n" - "ldr s10, [%[wbptr], #20]\n" - "subs x20, x20, #1\n" - "ldr s6, [%[wbptr], #24]\n" - "ldr s9, [%[wbptr], #28]\n" - "ldr s8, [%[wbptr], #32]\n" - "ldr s7, [%[wbptr], #36]\n" - "ldr s21, [%[inptr0]]\n" - "ldr s23, [x24]\n" - "fmla v3.4s, v21.4s, v4.4s\n" - "ldr s19, [%[inptr0], %[input_col_stride1]]\n" - "ldr s14, [x25]\n" - "ldr s13, [x24, %[input_col_stride1]]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "ldr s18, [%[inptr0], x27]\n" - "fmla v3.4s, v23.4s, v11.4s\n" - "ldr s15, [x28]\n" - "ldr s22, [x25, %[input_col_stride1]]\n" - "fmla v3.4s, v19.4s, v5.4s\n" - "fmla v3.4s, v14.4s, v9.4s\n" - "beq 6f\n" - "5:\n" - "fmla v3.4s, v13.4s, v10.4s\n" - "ldr s17, [x24, x27]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr s20, [%[inptr0], x23]\n" - "fmla v1.4s, v15.4s, v11.4s\n" - "ldr s19, [x26]\n" - "fmla v3.4s, v18.4s, v12.4s\n" - "ldr s13, [x28, %[input_col_stride1]]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr s14, [x25, x27]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "ldr s15, [x24, x23]\n" - "fmla v3.4s, v22.4s, v8.4s\n" - "ldr s16, [%[inptr0], x22]\n" - "fmla v2.4s, v20.4s, v5.4s\n" - "ldr s20, [x26, %[input_col_stride1]]\n" - "fmla v1.4s, v19.4s, v9.4s\n" - "ldr s19, [x28, x27]\n" - "fmla v3.4s, v17.4s, v6.4s\n" - "ldr s21, [x25, x23]\n" - "fmla v2.4s, v14.4s, v9.4s\n" - "ldr s22, [x24, x22]\n" - "fmla v1.4s, v13.4s, v10.4s\n" - "ldr s23, [x26, x27]\n" - "fmla v3.4s, v14.4s, v7.4s\n" - "ldr s18, [x28, x23]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "ldr s13, [x25, x22]\n" - "fmla v1.4s, v14.4s, v12.4s\n" - "ldr s14, [x26, x23]\n" - "fmla v2.4s, v15.4s, v10.4s\n" - "ldr s17, [x28, x22]\n" - "fmla v0.4s, v19.4s, v11.4s\n" - "ldr s15, [x26, x22]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "ldr s16, [%[wbptr]]\n" - "fmla v0.4s, v21.4s, v5.4s\n" - "ldr s4, [%[wbptr], #4]\n" - "fmla v1.4s, v19.4s, v6.4s\n" - "ldr s11, [%[wbptr], #16]\n" - "fmla v2.4s, v21.4s, v8.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "ldr s5, [%[wbptr], #8]\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v2.4s, v22.4s, v6.4s\n" - "ldr s21, [%[inptr0]]\n" - "fmla v0.4s, v18.4s, v10.4s\n" - "ldr s9, [%[wbptr], #28]\n" - "movi v20.16b, #0\n" - "ldr s19, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v13.4s, v7.4s\n" - "ldr s18, [%[inptr0], x27]\n" - "fmla v0.4s, v13.4s, v12.4s\n" - "ldr s10, [%[wbptr], #20]\n" - "fmax v3.4s, v3.4s, v20.4s\n" - "add x24, x24, #4\n" - "fmax v2.4s, v2.4s, v20.4s\n" - "ldr s23, [x24]\n" - "str s3, [%[outptr0]]\n" - "fmla v0.4s, v14.4s, v8.4s\n" - "str s2, [%[outptr0], %[output_col_stride1]]\n" - "fmax v1.4s, v1.4s, v20.4s\n" - "mov v3.16b, v16.16b\n" - "ldr s12, [%[wbptr], #12]\n" - "str s1, [x19]\n" - "fmla v0.4s, v17.4s, v6.4s\n" - "mov v1.16b, v16.16b\n" - "ldr s8, [%[wbptr], #32]\n" - "mov v2.16b, v16.16b\n" - "ldr s13, [x24, %[input_col_stride1]]\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "ldr s6, [%[wbptr], #24]\n" - "fmla v3.4s, v21.4s, v4.4s\n" - "add x25, x25, #4\n" - "ldr s14, [x25]\n" - "add x28, x28, #4\n" - "fmax v0.4s, v0.4s, v20.4s\n" - "ldr s7, [%[wbptr], #36]\n" - "fmla v3.4s, v23.4s, v11.4s\n" - "ldr s15, [x28]\n" - "str s0, [x19, %[output_col_stride1]]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "mov v0.16b, v16.16b\n" - "ldr s22, [x25, %[input_col_stride1]]\n" - "fmla v3.4s, v19.4s, v5.4s\n" - "add x26, x26, #4\n" - "add %[outptr0], %[outptr0], #4\n" - "add x19, x19, #4\n" - "subs x20, x20, #1\n" - "fmla v3.4s, v14.4s, v9.4s\n" - "bne 5b\n" - "6:\n" - "fmla v3.4s, v13.4s, v10.4s\n" - "ldr s17, [x24, x27]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr s20, [%[inptr0], x23]\n" - "fmla v1.4s, v15.4s, v11.4s\n" - "ldr s19, [x26]\n" - "fmla v3.4s, v18.4s, v12.4s\n" - "ldr s13, [x28, %[input_col_stride1]]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr s14, [x25, x27]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "ldr s15, [x24, x23]\n" - "fmla v3.4s, v22.4s, v8.4s\n" - "ldr s16, [%[inptr0], x22]\n" - "fmla v2.4s, v20.4s, v5.4s\n" - "ldr s20, [x26, %[input_col_stride1]]\n" - "fmla v1.4s, v19.4s, v9.4s\n" - "ldr s19, [x28, x27]\n" - "fmla v3.4s, v17.4s, v6.4s\n" - "ldr s21, [x25, x23]\n" - "fmla v2.4s, v14.4s, v9.4s\n" - "ldr s22, [x24, x22]\n" - "fmla v1.4s, v13.4s, v10.4s\n" - "ldr s23, [x26, x27]\n" - "fmla v3.4s, v14.4s, v7.4s\n" - "ldr s18, [x28, x23]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "ldr s13, [x25, x22]\n" - "fmla v1.4s, v14.4s, v12.4s\n" - "ldr s14, [x26, x23]\n" - "fmla v2.4s, v15.4s, v10.4s\n" - "ldr s17, [x28, x22]\n" - "fmla v0.4s, v19.4s, v11.4s\n" - "ldr s15, [x26, x22]\n" - "fmla v1.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v21.4s, v5.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v1.4s, v19.4s, v6.4s\n" - "add x24, x24, #4\n" - "fmla v2.4s, v21.4s, v8.4s\n" - "add x25, x25, #4\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "add x28, x28, #4\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "add x26, x26, #4\n" - "fmla v2.4s, v22.4s, v6.4s\n" - "movi v20.16b, #0\n" - "fmla v0.4s, v18.4s, v10.4s\n" - "fmax v3.4s, v3.4s, v20.4s\n" - "fmla v2.4s, v13.4s, v7.4s\n" - "fmax v1.4s, v1.4s, v20.4s\n" - "str s3, [%[outptr0]]\n" - "fmla v0.4s, v13.4s, v12.4s\n" - "str s1, [x19]\n" - "fmax v2.4s, v2.4s, v20.4s\n" - "fmla v0.4s, v14.4s, v8.4s\n" - "str s2, [%[outptr0], %[output_col_stride1]]\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v0.4s, v17.4s, v6.4s\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "fmax v0.4s, v0.4s, v20.4s\n" - "str s0, [x19, %[output_col_stride1]]\n" - "add x19, x19, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input) - : [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU>( - int n_channels, - const void *weight_bias_ptr, - const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float *outptrs[Base::output_tile_rows][Base::output_tile_cols] -) -{ - __asm __volatile( - "mov x22, xzr\n" - "mov x26, xzr\n" - "and x23, %[n_channels], #3\n" - "lsr x24, %[n_channels], #2\n" - "cbz x24, 4f\n" - "1:\n" - "ldr q14, [%[wbptr]]\n" - "ldr x19, [%[inptrs], 0]\n" - "mov v3.16b, v14.16b\n" - "ldr q13, [%[wbptr], #16]\n" - "mov v1.16b, v14.16b\n" - "ldr q11, [%[wbptr], #32]\n" - "mov v2.16b, v14.16b\n" - "ldr q4, [%[wbptr], #48]\n" - "mov v0.16b, v14.16b\n" - "ldr q12, [%[wbptr], #64]\n" - "ldr q9, [%[wbptr], #80]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr q8, [%[wbptr], #96]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr q7, [%[wbptr], #112]\n" - "ldr x25, [%[inptrs], 120]\n" - "ldr q6, [%[wbptr], #128]\n" - "subs x24, x24, #1\n" - "ldr q5, [%[wbptr], #144]\n" - "ldr q15, [x19, x22]\n" - "fmla v3.4s, v15.4s, v13.4s\n" - "ldr q17, [x20, x22]\n" - "ldr q16, [x21, x22]\n" - "ldr x19, [%[inptrs], 8]\n" - "ldr q15, [x25, x22]\n" - "ldr x20, [%[inptrs], 48]\n" - "ldr q10, [x19, x22]\n" - "ldr x21, [%[inptrs], 88]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr q17, [x20, x22]\n" - "ldr q14, [x21, x22]\n" - "ldr x19, [%[inptrs], 16]\n" - "ldr q18, [x19, x22]\n" - "fmla v3.4s, v10.4s, v11.4s\n" - "fmla v3.4s, v16.4s, v7.4s\n" - "beq 3f\n" - "2:\n" - "fmla v1.4s, v16.4s, v13.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v3.4s, v17.4s, v9.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v2.4s, v18.4s, v13.4s\n" - "ldr q16, [x20, x22]\n" - "movi v10.16b, #0\n" - "ldr q17, [x19, x22]\n" - "fmla v1.4s, v15.4s, v12.4s\n" - "ldr x27, [%[inptrs], 160]\n" - "fmla v3.4s, v18.4s, v4.4s\n" - "ldr x25, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "ldr q18, [x27, x22]\n" - "ldr q15, [x25, x22]\n" - "ldr x21, [%[inptrs], 96]\n" - "fmla v1.4s, v14.4s, v11.4s\n" - "ldr x20, [%[inptrs], 64]\n" - "fmla v3.4s, v14.4s, v6.4s\n" - "ldr q14, [x21, x22]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr q17, [x20, x22]\n" - "fmla v0.4s, v14.4s, v13.4s\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v1.4s, v18.4s, v7.4s\n" - "ldr x27, [%[inptrs], 168]\n" - "fmla v3.4s, v16.4s, v8.4s\n" - "ldr q18, [x19, x22]\n" - "fmla v2.4s, v14.4s, v7.4s\n" - "ldr q13, [x27, x22]\n" - "ldr x25, [%[inptrs], 136]\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v1.4s, v15.4s, v9.4s\n" - "ldr x27, [%[inptrs], 176]\n" - "fmla v3.4s, v14.4s, v5.4s\n" - "ldr q16, [x25, x22]\n" - "fmla v2.4s, v17.4s, v9.4s\n" - "ldr q17, [x21, x22]\n" - "fmla v0.4s, v16.4s, v12.4s\n" - "ldr q12, [x20, x22]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "ldr q15, [x27, x22]\n" - "fmax v3.4s, v3.4s, v10.4s\n" - "ldr x25, [%[inptrs], 144]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v0.4s, v17.4s, v11.4s\n" - "ldr q14, [x25, x22]\n" - "fmla v1.4s, v13.4s, v6.4s\n" - "ldr q11, [x21, x22]\n" - "ldr x27, [%[inptrs], 184]\n" - "ldr x25, [%[inptrs], 152]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str q3, [x21, x26]\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "fmla v1.4s, v16.4s, v8.4s\n" - "ldr q18, [x27, x22]\n" - "ldr q17, [x25, x22]\n" - "ldr x27, [%[inptrs], 192]\n" - "fmla v2.4s, v12.4s, v8.4s\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v0.4s, v14.4s, v9.4s\n" - "ldr q16, [x27, x22]\n" - "fmla v1.4s, v15.4s, v5.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "ldr q14, [%[wbptr]]\n" - "add x22, x22, #16\n" - "fmla v2.4s, v11.4s, v5.4s\n" - "ldr q13, [%[wbptr], #16]\n" - "fmla v0.4s, v11.4s, v4.4s\n" - "ldr q11, [%[wbptr], #32]\n" - "fmax v1.4s, v1.4s, v10.4s\n" - "ldr q12, [%[wbptr], #64]\n" - "mov v3.16b, v14.16b\n" - "ldr q9, [%[wbptr], #80]\n" - "fmax v2.4s, v2.4s, v10.4s\n" - "ldr q7, [%[wbptr], #112]\n" - "str q1, [x28, x26]\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "mov v1.16b, v14.16b\n" - "ldr q4, [%[wbptr], #48]\n" - "str q2, [x21, x26]\n" - "ldr x28, [%[outptrs], 24]\n" - "mov v2.16b, v14.16b\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v17.4s, v8.4s\n" - "ldr q6, [%[wbptr], #128]\n" - "ldr x19, [%[inptrs], 0]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr x25, [%[inptrs], 120]\n" - "subs x24, x24, #1\n" - "ldr q15, [x19, x22]\n" - "fmla v0.4s, v16.4s, v5.4s\n" - "ldr q8, [%[wbptr], #96]\n" - "fmla v3.4s, v15.4s, v13.4s\n" - "ldr q17, [x20, x22]\n" - "ldr q16, [x21, x22]\n" - "ldr x19, [%[inptrs], 8]\n" - "ldr q15, [x25, x22]\n" - "ldr x20, [%[inptrs], 48]\n" - "fmax v0.4s, v0.4s, v10.4s\n" - "ldr q5, [%[wbptr], #144]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr q10, [x19, x22]\n" - "ldr q17, [x20, x22]\n" - "ldr x19, [%[inptrs], 16]\n" - "str q0, [x28, x26]\n" - "ldr x21, [%[inptrs], 88]\n" - "mov v0.16b, v14.16b\n" - "ldr q18, [x19, x22]\n" - "fmla v3.4s, v10.4s, v11.4s\n" - "ldr q14, [x21, x22]\n" - "add x26, x26, #16\n" - "fmla v3.4s, v16.4s, v7.4s\n" - "bne 2b\n" - "3:\n" - "fmla v1.4s, v16.4s, v13.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v3.4s, v17.4s, v9.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v2.4s, v18.4s, v13.4s\n" - "ldr q16, [x20, x22]\n" - "movi v10.16b, #0\n" - "ldr q17, [x19, x22]\n" - "fmla v1.4s, v15.4s, v12.4s\n" - "ldr x27, [%[inptrs], 160]\n" - "fmla v3.4s, v18.4s, v4.4s\n" - "ldr x25, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "ldr q18, [x27, x22]\n" - "ldr q15, [x25, x22]\n" - "ldr x21, [%[inptrs], 96]\n" - "fmla v1.4s, v14.4s, v11.4s\n" - "ldr x20, [%[inptrs], 64]\n" - "fmla v3.4s, v14.4s, v6.4s\n" - "ldr q14, [x21, x22]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr q17, [x20, x22]\n" - "fmla v0.4s, v14.4s, v13.4s\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v1.4s, v18.4s, v7.4s\n" - "ldr x27, [%[inptrs], 168]\n" - "fmla v3.4s, v16.4s, v8.4s\n" - "ldr q18, [x19, x22]\n" - "fmla v2.4s, v14.4s, v7.4s\n" - "ldr q13, [x27, x22]\n" - "ldr x25, [%[inptrs], 136]\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v1.4s, v15.4s, v9.4s\n" - "ldr x27, [%[inptrs], 176]\n" - "fmla v3.4s, v14.4s, v5.4s\n" - "ldr q16, [x25, x22]\n" - "fmla v2.4s, v17.4s, v9.4s\n" - "ldr q17, [x21, x22]\n" - "fmla v0.4s, v16.4s, v12.4s\n" - "ldr q12, [x20, x22]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "ldr q15, [x27, x22]\n" - "fmax v3.4s, v3.4s, v10.4s\n" - "ldr x25, [%[inptrs], 144]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v0.4s, v17.4s, v11.4s\n" - "ldr q14, [x25, x22]\n" - "fmla v1.4s, v13.4s, v6.4s\n" - "ldr q11, [x21, x22]\n" - "ldr x27, [%[inptrs], 184]\n" - "ldr x25, [%[inptrs], 152]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str q3, [x21, x26]\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "fmla v1.4s, v16.4s, v8.4s\n" - "ldr q18, [x27, x22]\n" - "ldr q17, [x25, x22]\n" - "ldr x27, [%[inptrs], 192]\n" - "fmla v2.4s, v12.4s, v8.4s\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v0.4s, v14.4s, v9.4s\n" - "ldr q16, [x27, x22]\n" - "fmla v1.4s, v15.4s, v5.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "add x22, x22, #16\n" - "fmla v2.4s, v11.4s, v5.4s\n" - "fmla v0.4s, v11.4s, v4.4s\n" - "fmax v1.4s, v1.4s, v10.4s\n" - "fmax v2.4s, v2.4s, v10.4s\n" - "str q1, [x28, x26]\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "ldr x28, [%[outptrs], 24]\n" - "str q2, [x21, x26]\n" - "fmla v0.4s, v17.4s, v8.4s\n" - "fmla v0.4s, v16.4s, v5.4s\n" - "fmax v0.4s, v0.4s, v10.4s\n" - "str q0, [x28, x26]\n" - "add x26, x26, #16\n" - "4:\n" - "cbz x23, 7f\n" - "ldr s14, [%[wbptr]]\n" - "mov v3.16b, v14.16b\n" - "ldr s13, [%[wbptr], #4]\n" - "mov v1.16b, v14.16b\n" - "ldr s11, [%[wbptr], #8]\n" - "mov v2.16b, v14.16b\n" - "ldr s4, [%[wbptr], #12]\n" - "mov v0.16b, v14.16b\n" - "ldr s12, [%[wbptr], #16]\n" - "ldr s9, [%[wbptr], #20]\n" - "ldr x19, [%[inptrs], 0]\n" - "ldr s8, [%[wbptr], #24]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr s7, [%[wbptr], #28]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr s6, [%[wbptr], #32]\n" - "ldr x25, [%[inptrs], 120]\n" - "ldr s5, [%[wbptr], #36]\n" - "subs x23, x23, #1\n" - "ldr s15, [x19, x22]\n" - "ldr s17, [x20, x22]\n" - "fmla v3.4s, v15.4s, v13.4s\n" - "ldr s16, [x21, x22]\n" - "ldr s15, [x25, x22]\n" - "ldr x19, [%[inptrs], 8]\n" - "ldr x20, [%[inptrs], 48]\n" - "ldr x21, [%[inptrs], 88]\n" - "ldr s10, [x19, x22]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr s17, [x20, x22]\n" - "ldr s14, [x21, x22]\n" - "ldr x19, [%[inptrs], 16]\n" - "ldr s18, [x19, x22]\n" - "fmla v3.4s, v10.4s, v11.4s\n" - "fmla v3.4s, v16.4s, v7.4s\n" - "beq 6f\n" - "5:\n" - "fmla v1.4s, v16.4s, v13.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v3.4s, v17.4s, v9.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v2.4s, v18.4s, v13.4s\n" - "ldr s16, [x20, x22]\n" - "movi v10.16b, #0\n" - "ldr s17, [x19, x22]\n" - "fmla v1.4s, v15.4s, v12.4s\n" - "ldr x27, [%[inptrs], 160]\n" - "fmla v3.4s, v18.4s, v4.4s\n" - "ldr x25, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "ldr s18, [x27, x22]\n" - "ldr s15, [x25, x22]\n" - "ldr x21, [%[inptrs], 96]\n" - "fmla v1.4s, v14.4s, v11.4s\n" - "ldr x20, [%[inptrs], 64]\n" - "fmla v3.4s, v14.4s, v6.4s\n" - "ldr s14, [x21, x22]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr s17, [x20, x22]\n" - "fmla v0.4s, v14.4s, v13.4s\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v1.4s, v18.4s, v7.4s\n" - "ldr x27, [%[inptrs], 168]\n" - "fmla v3.4s, v16.4s, v8.4s\n" - "ldr s18, [x19, x22]\n" - "fmla v2.4s, v14.4s, v7.4s\n" - "ldr s13, [x27, x22]\n" - "ldr x25, [%[inptrs], 136]\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v1.4s, v15.4s, v9.4s\n" - "ldr x27, [%[inptrs], 176]\n" - "fmla v3.4s, v14.4s, v5.4s\n" - "ldr s16, [x25, x22]\n" - "fmla v2.4s, v17.4s, v9.4s\n" - "ldr s17, [x21, x22]\n" - "fmla v0.4s, v16.4s, v12.4s\n" - "ldr s12, [x20, x22]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "ldr s15, [x27, x22]\n" - "fmax v3.4s, v3.4s, v10.4s\n" - "ldr x25, [%[inptrs], 144]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v0.4s, v17.4s, v11.4s\n" - "ldr s14, [x25, x22]\n" - "fmla v1.4s, v13.4s, v6.4s\n" - "ldr s11, [x21, x22]\n" - "ldr x27, [%[inptrs], 184]\n" - "ldr x25, [%[inptrs], 152]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str s3, [x21, x26]\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "fmla v1.4s, v16.4s, v8.4s\n" - "ldr s18, [x27, x22]\n" - "ldr s17, [x25, x22]\n" - "ldr x27, [%[inptrs], 192]\n" - "fmla v2.4s, v12.4s, v8.4s\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v0.4s, v14.4s, v9.4s\n" - "ldr s16, [x27, x22]\n" - "fmla v1.4s, v15.4s, v5.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "ldr s14, [%[wbptr]]\n" - "add x22, x22, #4\n" - "fmla v2.4s, v11.4s, v5.4s\n" - "ldr s13, [%[wbptr], #4]\n" - "fmla v0.4s, v11.4s, v4.4s\n" - "ldr s11, [%[wbptr], #8]\n" - "fmax v1.4s, v1.4s, v10.4s\n" - "ldr s12, [%[wbptr], #16]\n" - "mov v3.16b, v14.16b\n" - "ldr s9, [%[wbptr], #20]\n" - "fmax v2.4s, v2.4s, v10.4s\n" - "ldr s7, [%[wbptr], #28]\n" - "str s1, [x28, x26]\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "mov v1.16b, v14.16b\n" - "ldr s4, [%[wbptr], #12]\n" - "str s2, [x21, x26]\n" - "ldr x28, [%[outptrs], 24]\n" - "mov v2.16b, v14.16b\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v0.4s, v17.4s, v8.4s\n" - "ldr s6, [%[wbptr], #32]\n" - "ldr x19, [%[inptrs], 0]\n" - "ldr x20, [%[inptrs], 40]\n" - "ldr x21, [%[inptrs], 80]\n" - "ldr x25, [%[inptrs], 120]\n" - "subs x23, x23, #1\n" - "ldr s15, [x19, x22]\n" - "fmla v0.4s, v16.4s, v5.4s\n" - "ldr s8, [%[wbptr], #24]\n" - "fmla v3.4s, v15.4s, v13.4s\n" - "ldr s17, [x20, x22]\n" - "ldr s16, [x21, x22]\n" - "ldr x19, [%[inptrs], 8]\n" - "ldr s15, [x25, x22]\n" - "ldr x20, [%[inptrs], 48]\n" - "fmax v0.4s, v0.4s, v10.4s\n" - "ldr s5, [%[wbptr], #36]\n" - "fmla v3.4s, v17.4s, v12.4s\n" - "ldr s10, [x19, x22]\n" - "ldr s17, [x20, x22]\n" - "ldr x19, [%[inptrs], 16]\n" - "str s0, [x28, x26]\n" - "ldr x21, [%[inptrs], 88]\n" - "mov v0.16b, v14.16b\n" - "ldr s18, [x19, x22]\n" - "fmla v3.4s, v10.4s, v11.4s\n" - "ldr s14, [x21, x22]\n" - "add x26, x26, #4\n" - "fmla v3.4s, v16.4s, v7.4s\n" - "bne 5b\n" - "6:\n" - "fmla v1.4s, v16.4s, v13.4s\n" - "ldr x20, [%[inptrs], 56]\n" - "fmla v3.4s, v17.4s, v9.4s\n" - "ldr x19, [%[inptrs], 24]\n" - "fmla v2.4s, v18.4s, v13.4s\n" - "ldr s16, [x20, x22]\n" - "movi v10.16b, #0\n" - "ldr s17, [x19, x22]\n" - "fmla v1.4s, v15.4s, v12.4s\n" - "ldr x27, [%[inptrs], 160]\n" - "fmla v3.4s, v18.4s, v4.4s\n" - "ldr x25, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v12.4s\n" - "ldr s18, [x27, x22]\n" - "ldr s15, [x25, x22]\n" - "ldr x21, [%[inptrs], 96]\n" - "fmla v1.4s, v14.4s, v11.4s\n" - "ldr x20, [%[inptrs], 64]\n" - "fmla v3.4s, v14.4s, v6.4s\n" - "ldr s14, [x21, x22]\n" - "fmla v2.4s, v17.4s, v11.4s\n" - "ldr s17, [x20, x22]\n" - "fmla v0.4s, v14.4s, v13.4s\n" - "ldr x19, [%[inptrs], 32]\n" - "fmla v1.4s, v18.4s, v7.4s\n" - "ldr x27, [%[inptrs], 168]\n" - "fmla v3.4s, v16.4s, v8.4s\n" - "ldr s18, [x19, x22]\n" - "fmla v2.4s, v14.4s, v7.4s\n" - "ldr s13, [x27, x22]\n" - "ldr x25, [%[inptrs], 136]\n" - "ldr x21, [%[inptrs], 104]\n" - "ldr x20, [%[inptrs], 72]\n" - "fmla v1.4s, v15.4s, v9.4s\n" - "ldr x27, [%[inptrs], 176]\n" - "fmla v3.4s, v14.4s, v5.4s\n" - "ldr s16, [x25, x22]\n" - "fmla v2.4s, v17.4s, v9.4s\n" - "ldr s17, [x21, x22]\n" - "fmla v0.4s, v16.4s, v12.4s\n" - "ldr s12, [x20, x22]\n" - "fmla v1.4s, v14.4s, v4.4s\n" - "ldr s15, [x27, x22]\n" - "fmax v3.4s, v3.4s, v10.4s\n" - "ldr x25, [%[inptrs], 144]\n" - "fmla v2.4s, v18.4s, v4.4s\n" - "ldr x21, [%[inptrs], 112]\n" - "fmla v0.4s, v17.4s, v11.4s\n" - "ldr s14, [x25, x22]\n" - "fmla v1.4s, v13.4s, v6.4s\n" - "ldr s11, [x21, x22]\n" - "ldr x27, [%[inptrs], 184]\n" - "ldr x25, [%[inptrs], 152]\n" - "ldr x21, [%[outptrs], 0]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr x28, [%[outptrs], 16]\n" - "str s3, [x21, x26]\n" - "fmla v0.4s, v15.4s, v7.4s\n" - "fmla v1.4s, v16.4s, v8.4s\n" - "ldr s18, [x27, x22]\n" - "ldr s17, [x25, x22]\n" - "ldr x27, [%[inptrs], 192]\n" - "fmla v2.4s, v12.4s, v8.4s\n" - "ldr x21, [%[outptrs], 8]\n" - "fmla v0.4s, v14.4s, v9.4s\n" - "ldr s16, [x27, x22]\n" - "fmla v1.4s, v15.4s, v5.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "add x22, x22, #4\n" - "fmla v2.4s, v11.4s, v5.4s\n" - "fmla v0.4s, v11.4s, v4.4s\n" - "fmax v1.4s, v1.4s, v10.4s\n" - "fmax v2.4s, v2.4s, v10.4s\n" - "str s1, [x28, x26]\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "ldr x28, [%[outptrs], 24]\n" - "str s2, [x21, x26]\n" - "fmla v0.4s, v17.4s, v8.4s\n" - "fmla v0.4s, v16.4s, v5.4s\n" - "fmax v0.4s, v0.4s, v10.4s\n" - "str s0, [x28, x26]\n" - "add x26, x26, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr) - : [inptrs] "r" (inptrs), [n_channels] "r" ((long) n_channels), [outptrs] "r" (outptrs) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU6>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x21, %[inptr0], %[input_row_stride]\n" - "add x23, %[input_col_stride1], %[input_col_stride1]\n" - "add x24, %[outptr0], %[output_row_stride]\n" - "add x28, x21, %[input_row_stride]\n" - "add x26, x23, %[input_col_stride1]\n" - "and x19, %[n_channels], #3\n" - "add x27, x28, %[input_row_stride]\n" - "add x25, x26, %[input_col_stride1]\n" - "lsr x20, %[n_channels], #2\n" - "add x22, x27, %[input_row_stride]\n" - "cbz x20, 4f\n" - "1:\n" - "ldr q14, [%[wbptr]]\n" - "subs x20, x20, #1\n" - "mov v5.16b, v14.16b\n" - "ldr q0, [%[wbptr], #16]\n" - "mov v11.16b, v14.16b\n" - "ldr q1, [%[wbptr], #32]\n" - "mov v12.16b, v14.16b\n" - "ldr q2, [%[wbptr], #48]\n" - "mov v10.16b, v14.16b\n" - "ldr q6, [%[wbptr], #64]\n" - "ldr q3, [%[wbptr], #80]\n" - "ldr q7, [%[wbptr], #96]\n" - "ldr q4, [%[wbptr], #112]\n" - "ldr q8, [%[wbptr], #128]\n" - "ldr q9, [%[wbptr], #144]\n" - "ldr q19, [%[inptr0]]\n" - "fmla v5.4s, v19.4s, v0.4s\n" - "ldr q15, [x21]\n" - "ldr q21, [%[inptr0], %[input_col_stride1]]\n" - "ldr q16, [x28]\n" - "fmla v11.4s, v16.4s, v0.4s\n" - "ldr q23, [x21, %[input_col_stride1]]\n" - "fmla v5.4s, v15.4s, v6.4s\n" - "ldr q18, [%[inptr0], x23]\n" - "ldr q17, [x27]\n" - "ldr q13, [x28, %[input_col_stride1]]\n" - "fmla v5.4s, v21.4s, v1.4s\n" - "fmla v5.4s, v16.4s, v4.4s\n" - "beq 3f\n" - "2:\n" - "fmla v5.4s, v23.4s, v3.4s\n" - "ldr q21, [x21, x23]\n" - "fmla v12.4s, v18.4s, v0.4s\n" - "ldr q20, [%[inptr0], x26]\n" - "fmla v11.4s, v17.4s, v6.4s\n" - "ldr q19, [x22]\n" - "fmla v5.4s, v18.4s, v2.4s\n" - "ldr q15, [x27, %[input_col_stride1]]\n" - "fmla v12.4s, v21.4s, v6.4s\n" - "ldr q16, [x28, x23]\n" - "fmla v11.4s, v13.4s, v1.4s\n" - "ldr q17, [x21, x26]\n" - "fmla v5.4s, v13.4s, v8.4s\n" - "ldr q14, [%[inptr0], x25]\n" - "fmla v12.4s, v20.4s, v1.4s\n" - "ldr q20, [x22, %[input_col_stride1]]\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr q19, [x27, x23]\n" - "fmla v5.4s, v21.4s, v7.4s\n" - "ldr q22, [x28, x26]\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "ldr q21, [x21, x25]\n" - "fmla v11.4s, v15.4s, v3.4s\n" - "ldr q23, [x22, x23]\n" - "fmla v5.4s, v16.4s, v9.4s\n" - "ldr q18, [x27, x26]\n" - "fmla v10.4s, v16.4s, v0.4s\n" - "ldr q15, [x28, x25]\n" - "fmla v11.4s, v16.4s, v2.4s\n" - "ldr q16, [x22, x26]\n" - "fmla v12.4s, v17.4s, v3.4s\n" - "ldr q17, [x27, x25]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "ldr q13, [x22, x25]\n" - "fmla v11.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v12.4s, v14.4s, v2.4s\n" - "ldr q14, [%[wbptr]]\n" - "fmla v10.4s, v22.4s, v1.4s\n" - "ldr q0, [%[wbptr], #16]\n" - "fmla v11.4s, v19.4s, v7.4s\n" - "ldr q6, [%[wbptr], #64]\n" - "fmla v12.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v10.4s, v23.4s, v4.4s\n" - "ldr q1, [%[wbptr], #32]\n" - "fmla v11.4s, v23.4s, v9.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v12.4s, v21.4s, v7.4s\n" - "ldr q19, [%[inptr0]]\n" - "fmla v10.4s, v18.4s, v3.4s\n" - "ldr q4, [%[wbptr], #112]\n" - "movi v20.16b, #0\n" - "ldr q21, [%[inptr0], %[input_col_stride1]]\n" - "fmla v12.4s, v15.4s, v9.4s\n" - "ldr q18, [%[inptr0], x23]\n" - "fmla v10.4s, v15.4s, v2.4s\n" - "ldr q3, [%[wbptr], #80]\n" - "fmov v22.4s, #6.0\n" - "add x21, x21, #16\n" - "fmax v5.4s, v5.4s, v20.4s\n" - "ldr q15, [x21]\n" - "fmla v10.4s, v16.4s, v8.4s\n" - "ldr q2, [%[wbptr], #48]\n" - "fmin v5.4s, v5.4s, v22.4s\n" - "ldr q23, [x21, %[input_col_stride1]]\n" - "fmax v12.4s, v12.4s, v20.4s\n" - "add x28, x28, #16\n" - "str q5, [%[outptr0]]\n" - "fmla v10.4s, v17.4s, v7.4s\n" - "fmin v12.4s, v12.4s, v22.4s\n" - "ldr q8, [%[wbptr], #128]\n" - "fmax v11.4s, v11.4s, v20.4s\n" - "ldr q16, [x28]\n" - "str q12, [%[outptr0], %[output_col_stride1]]\n" - "fmla v10.4s, v13.4s, v9.4s\n" - "fmin v11.4s, v11.4s, v22.4s\n" - "ldr q7, [%[wbptr], #96]\n" - "mov v5.16b, v14.16b\n" - "ldr q13, [x28, %[input_col_stride1]]\n" - "str q11, [x24]\n" - "fmax v10.4s, v10.4s, v20.4s\n" - "mov v11.16b, v14.16b\n" - "ldr q9, [%[wbptr], #144]\n" - "fmin v10.4s, v10.4s, v22.4s\n" - "add x27, x27, #16\n" - "mov v12.16b, v14.16b\n" - "ldr q17, [x27]\n" - "str q10, [x24, %[output_col_stride1]]\n" - "fmla v5.4s, v19.4s, v0.4s\n" - "mov v10.16b, v14.16b\n" - "add x22, x22, #16\n" - "fmla v11.4s, v16.4s, v0.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v5.4s, v15.4s, v6.4s\n" - "add x24, x24, #16\n" - "subs x20, x20, #1\n" - "fmla v5.4s, v21.4s, v1.4s\n" - "fmla v5.4s, v16.4s, v4.4s\n" - "bne 2b\n" - "3:\n" - "fmla v5.4s, v23.4s, v3.4s\n" - "ldr q21, [x21, x23]\n" - "fmla v12.4s, v18.4s, v0.4s\n" - "ldr q20, [%[inptr0], x26]\n" - "fmla v11.4s, v17.4s, v6.4s\n" - "ldr q19, [x22]\n" - "fmla v5.4s, v18.4s, v2.4s\n" - "ldr q15, [x27, %[input_col_stride1]]\n" - "fmla v12.4s, v21.4s, v6.4s\n" - "ldr q16, [x28, x23]\n" - "fmla v11.4s, v13.4s, v1.4s\n" - "ldr q17, [x21, x26]\n" - "fmla v5.4s, v13.4s, v8.4s\n" - "ldr q14, [%[inptr0], x25]\n" - "fmla v12.4s, v20.4s, v1.4s\n" - "ldr q20, [x22, %[input_col_stride1]]\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr q19, [x27, x23]\n" - "fmla v5.4s, v21.4s, v7.4s\n" - "ldr q22, [x28, x26]\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "ldr q21, [x21, x25]\n" - "fmla v11.4s, v15.4s, v3.4s\n" - "ldr q23, [x22, x23]\n" - "fmla v5.4s, v16.4s, v9.4s\n" - "ldr q18, [x27, x26]\n" - "fmla v10.4s, v16.4s, v0.4s\n" - "ldr q15, [x28, x25]\n" - "fmla v11.4s, v16.4s, v2.4s\n" - "ldr q16, [x22, x26]\n" - "fmla v12.4s, v17.4s, v3.4s\n" - "ldr q17, [x27, x25]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "ldr q13, [x22, x25]\n" - "fmla v11.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v12.4s, v14.4s, v2.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v10.4s, v22.4s, v1.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v11.4s, v19.4s, v7.4s\n" - "add x21, x21, #16\n" - "fmla v12.4s, v22.4s, v8.4s\n" - "add x28, x28, #16\n" - "fmla v10.4s, v23.4s, v4.4s\n" - "add x27, x27, #16\n" - "fmla v11.4s, v23.4s, v9.4s\n" - "add x22, x22, #16\n" - "fmla v12.4s, v21.4s, v7.4s\n" - "movi v20.16b, #0\n" - "fmla v10.4s, v18.4s, v3.4s\n" - "fmov v22.4s, #6.0\n" - "fmax v5.4s, v5.4s, v20.4s\n" - "fmax v11.4s, v11.4s, v20.4s\n" - "fmla v12.4s, v15.4s, v9.4s\n" - "fmla v10.4s, v15.4s, v2.4s\n" - "fmin v5.4s, v5.4s, v22.4s\n" - "fmin v11.4s, v11.4s, v22.4s\n" - "fmax v12.4s, v12.4s, v20.4s\n" - "str q5, [%[outptr0]]\n" - "str q11, [x24]\n" - "fmla v10.4s, v16.4s, v8.4s\n" - "fmin v12.4s, v12.4s, v22.4s\n" - "str q12, [%[outptr0], %[output_col_stride1]]\n" - "fmla v10.4s, v17.4s, v7.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v10.4s, v13.4s, v9.4s\n" - "fmax v10.4s, v10.4s, v20.4s\n" - "fmin v10.4s, v10.4s, v22.4s\n" - "str q10, [x24, %[output_col_stride1]]\n" - "add x24, x24, #16\n" - "4:\n" - "cbz x19, 7f\n" - "ldr s14, [%[wbptr]]\n" - "mov v5.16b, v14.16b\n" - "ldr s0, [%[wbptr], #4]\n" - "mov v11.16b, v14.16b\n" - "ldr s1, [%[wbptr], #8]\n" - "mov v12.16b, v14.16b\n" - "ldr s2, [%[wbptr], #12]\n" - "mov v10.16b, v14.16b\n" - "ldr s6, [%[wbptr], #16]\n" - "ldr s3, [%[wbptr], #20]\n" - "subs x19, x19, #1\n" - "ldr s7, [%[wbptr], #24]\n" - "ldr s4, [%[wbptr], #28]\n" - "ldr s8, [%[wbptr], #32]\n" - "ldr s9, [%[wbptr], #36]\n" - "ldr s19, [%[inptr0]]\n" - "ldr s15, [x21]\n" - "fmla v5.4s, v19.4s, v0.4s\n" - "ldr s21, [%[inptr0], %[input_col_stride1]]\n" - "ldr s16, [x28]\n" - "ldr s23, [x21, %[input_col_stride1]]\n" - "fmla v11.4s, v16.4s, v0.4s\n" - "ldr s18, [%[inptr0], x23]\n" - "fmla v5.4s, v15.4s, v6.4s\n" - "ldr s17, [x27]\n" - "ldr s13, [x28, %[input_col_stride1]]\n" - "fmla v5.4s, v21.4s, v1.4s\n" - "fmla v5.4s, v16.4s, v4.4s\n" - "beq 6f\n" - "5:\n" - "fmla v5.4s, v23.4s, v3.4s\n" - "ldr s21, [x21, x23]\n" - "fmla v12.4s, v18.4s, v0.4s\n" - "ldr s20, [%[inptr0], x26]\n" - "fmla v11.4s, v17.4s, v6.4s\n" - "ldr s19, [x22]\n" - "fmla v5.4s, v18.4s, v2.4s\n" - "ldr s15, [x27, %[input_col_stride1]]\n" - "fmla v12.4s, v21.4s, v6.4s\n" - "ldr s16, [x28, x23]\n" - "fmla v11.4s, v13.4s, v1.4s\n" - "ldr s17, [x21, x26]\n" - "fmla v5.4s, v13.4s, v8.4s\n" - "ldr s14, [%[inptr0], x25]\n" - "fmla v12.4s, v20.4s, v1.4s\n" - "ldr s20, [x22, %[input_col_stride1]]\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr s19, [x27, x23]\n" - "fmla v5.4s, v21.4s, v7.4s\n" - "ldr s22, [x28, x26]\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "ldr s21, [x21, x25]\n" - "fmla v11.4s, v15.4s, v3.4s\n" - "ldr s23, [x22, x23]\n" - "fmla v5.4s, v16.4s, v9.4s\n" - "ldr s18, [x27, x26]\n" - "fmla v10.4s, v16.4s, v0.4s\n" - "ldr s15, [x28, x25]\n" - "fmla v11.4s, v16.4s, v2.4s\n" - "ldr s16, [x22, x26]\n" - "fmla v12.4s, v17.4s, v3.4s\n" - "ldr s17, [x27, x25]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "ldr s13, [x22, x25]\n" - "fmla v11.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v12.4s, v14.4s, v2.4s\n" - "ldr s14, [%[wbptr]]\n" - "fmla v10.4s, v22.4s, v1.4s\n" - "ldr s0, [%[wbptr], #4]\n" - "fmla v11.4s, v19.4s, v7.4s\n" - "ldr s6, [%[wbptr], #16]\n" - "fmla v12.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v10.4s, v23.4s, v4.4s\n" - "ldr s1, [%[wbptr], #8]\n" - "fmla v11.4s, v23.4s, v9.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v12.4s, v21.4s, v7.4s\n" - "ldr s19, [%[inptr0]]\n" - "fmla v10.4s, v18.4s, v3.4s\n" - "ldr s4, [%[wbptr], #28]\n" - "movi v20.16b, #0\n" - "ldr s21, [%[inptr0], %[input_col_stride1]]\n" - "fmla v12.4s, v15.4s, v9.4s\n" - "ldr s18, [%[inptr0], x23]\n" - "fmla v10.4s, v15.4s, v2.4s\n" - "ldr s3, [%[wbptr], #20]\n" - "fmov v22.4s, #6.0\n" - "add x21, x21, #4\n" - "fmax v5.4s, v5.4s, v20.4s\n" - "ldr s15, [x21]\n" - "fmla v10.4s, v16.4s, v8.4s\n" - "ldr s2, [%[wbptr], #12]\n" - "fmin v5.4s, v5.4s, v22.4s\n" - "ldr s23, [x21, %[input_col_stride1]]\n" - "fmax v12.4s, v12.4s, v20.4s\n" - "add x28, x28, #4\n" - "str s5, [%[outptr0]]\n" - "fmla v10.4s, v17.4s, v7.4s\n" - "fmin v12.4s, v12.4s, v22.4s\n" - "ldr s8, [%[wbptr], #32]\n" - "fmax v11.4s, v11.4s, v20.4s\n" - "ldr s16, [x28]\n" - "str s12, [%[outptr0], %[output_col_stride1]]\n" - "fmla v10.4s, v13.4s, v9.4s\n" - "fmin v11.4s, v11.4s, v22.4s\n" - "ldr s7, [%[wbptr], #24]\n" - "mov v5.16b, v14.16b\n" - "ldr s13, [x28, %[input_col_stride1]]\n" - "str s11, [x24]\n" - "fmax v10.4s, v10.4s, v20.4s\n" - "mov v11.16b, v14.16b\n" - "ldr s9, [%[wbptr], #36]\n" - "fmin v10.4s, v10.4s, v22.4s\n" - "add x27, x27, #4\n" - "mov v12.16b, v14.16b\n" - "ldr s17, [x27]\n" - "str s10, [x24, %[output_col_stride1]]\n" - "fmla v5.4s, v19.4s, v0.4s\n" - "mov v10.16b, v14.16b\n" - "add x22, x22, #4\n" - "fmla v11.4s, v16.4s, v0.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v5.4s, v15.4s, v6.4s\n" - "add x24, x24, #4\n" - "subs x19, x19, #1\n" - "fmla v5.4s, v21.4s, v1.4s\n" - "fmla v5.4s, v16.4s, v4.4s\n" - "bne 5b\n" - "6:\n" - "fmla v5.4s, v23.4s, v3.4s\n" - "ldr s21, [x21, x23]\n" - "fmla v12.4s, v18.4s, v0.4s\n" - "ldr s20, [%[inptr0], x26]\n" - "fmla v11.4s, v17.4s, v6.4s\n" - "ldr s19, [x22]\n" - "fmla v5.4s, v18.4s, v2.4s\n" - "ldr s15, [x27, %[input_col_stride1]]\n" - "fmla v12.4s, v21.4s, v6.4s\n" - "ldr s16, [x28, x23]\n" - "fmla v11.4s, v13.4s, v1.4s\n" - "ldr s17, [x21, x26]\n" - "fmla v5.4s, v13.4s, v8.4s\n" - "ldr s14, [%[inptr0], x25]\n" - "fmla v12.4s, v20.4s, v1.4s\n" - "ldr s20, [x22, %[input_col_stride1]]\n" - "fmla v11.4s, v19.4s, v4.4s\n" - "ldr s19, [x27, x23]\n" - "fmla v5.4s, v21.4s, v7.4s\n" - "ldr s22, [x28, x26]\n" - "fmla v12.4s, v16.4s, v4.4s\n" - "ldr s21, [x21, x25]\n" - "fmla v11.4s, v15.4s, v3.4s\n" - "ldr s23, [x22, x23]\n" - "fmla v5.4s, v16.4s, v9.4s\n" - "ldr s18, [x27, x26]\n" - "fmla v10.4s, v16.4s, v0.4s\n" - "ldr s15, [x28, x25]\n" - "fmla v11.4s, v16.4s, v2.4s\n" - "ldr s16, [x22, x26]\n" - "fmla v12.4s, v17.4s, v3.4s\n" - "ldr s17, [x27, x25]\n" - "fmla v10.4s, v19.4s, v6.4s\n" - "ldr s13, [x22, x25]\n" - "fmla v11.4s, v20.4s, v8.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v12.4s, v14.4s, v2.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v10.4s, v22.4s, v1.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v11.4s, v19.4s, v7.4s\n" - "add x21, x21, #4\n" - "fmla v12.4s, v22.4s, v8.4s\n" - "add x28, x28, #4\n" - "fmla v10.4s, v23.4s, v4.4s\n" - "add x27, x27, #4\n" - "fmla v11.4s, v23.4s, v9.4s\n" - "add x22, x22, #4\n" - "fmla v12.4s, v21.4s, v7.4s\n" - "movi v20.16b, #0\n" - "fmla v10.4s, v18.4s, v3.4s\n" - "fmov v22.4s, #6.0\n" - "fmax v5.4s, v5.4s, v20.4s\n" - "fmax v11.4s, v11.4s, v20.4s\n" - "fmla v12.4s, v15.4s, v9.4s\n" - "fmla v10.4s, v15.4s, v2.4s\n" - "fmin v5.4s, v5.4s, v22.4s\n" - "fmin v11.4s, v11.4s, v22.4s\n" - "fmax v12.4s, v12.4s, v20.4s\n" - "str s5, [%[outptr0]]\n" - "str s11, [x24]\n" - "fmla v10.4s, v16.4s, v8.4s\n" - "fmin v12.4s, v12.4s, v22.4s\n" - "str s12, [%[outptr0], %[output_col_stride1]]\n" - "fmla v10.4s, v17.4s, v7.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v10.4s, v13.4s, v9.4s\n" - "fmax v10.4s, v10.4s, v20.4s\n" - "fmin v10.4s, v10.4s, v22.4s\n" - "str s10, [x24, %[output_col_stride1]]\n" - "add x24, x24, #4\n" - "7:\n" - : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr) - : [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU6>( - int n_channels, - const void *weight_bias_ptr, - const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float *outptrs[Base::output_tile_rows][Base::output_tile_cols] -) -{ - __asm __volatile( - "mov x27, xzr\n" - "mov x28, xzr\n" - "and x26, %[n_channels], #3\n" - "lsr x25, %[n_channels], #2\n" - "cbz x25, 4f\n" - "1:\n" - "ldr q15, [%[wbptr]]\n" - "ldr x21, [%[inptrs], 0]\n" - "mov v8.16b, v15.16b\n" - "ldr q14, [%[wbptr], #16]\n" - "mov v3.16b, v15.16b\n" - "ldr q10, [%[wbptr], #32]\n" - "mov v2.16b, v15.16b\n" - "ldr q7, [%[wbptr], #48]\n" - "mov v4.16b, v15.16b\n" - "ldr q13, [%[wbptr], #64]\n" - "ldr q5, [%[wbptr], #80]\n" - "ldr x22, [%[inptrs], 40]\n" - "ldr q0, [%[wbptr], #96]\n" - "ldr x20, [%[inptrs], 80]\n" - "ldr q9, [%[wbptr], #112]\n" - "ldr x23, [%[inptrs], 120]\n" - "ldr q6, [%[wbptr], #128]\n" - "subs x25, x25, #1\n" - "ldr q1, [%[wbptr], #144]\n" - "ldr q17, [x21, x27]\n" - "fmla v8.4s, v17.4s, v14.4s\n" - "ldr q18, [x22, x27]\n" - "ldr q16, [x20, x27]\n" - "ldr x21, [%[inptrs], 8]\n" - "ldr q17, [x23, x27]\n" - "ldr x22, [%[inptrs], 48]\n" - "ldr q11, [x21, x27]\n" - "ldr x20, [%[inptrs], 88]\n" - "fmla v8.4s, v18.4s, v13.4s\n" - "ldr q19, [x22, x27]\n" - "ldr q15, [x20, x27]\n" - "ldr x21, [%[inptrs], 16]\n" - "ldr q12, [x21, x27]\n" - "fmla v8.4s, v11.4s, v10.4s\n" - "fmla v8.4s, v16.4s, v9.4s\n" - "beq 3f\n" - "2:\n" - "fmla v3.4s, v16.4s, v14.4s\n" - "ldr x22, [%[inptrs], 56]\n" - "fmla v8.4s, v19.4s, v5.4s\n" - "ldr x21, [%[inptrs], 24]\n" - "fmla v2.4s, v12.4s, v14.4s\n" - "ldr q16, [x22, x27]\n" - "movi v11.16b, #0\n" - "ldr q18, [x21, x27]\n" - "fmla v3.4s, v17.4s, v13.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v8.4s, v12.4s, v7.4s\n" - "ldr x23, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v13.4s\n" - "ldr q19, [x20, x27]\n" - "fmov v12.4s, #6.0\n" - "ldr q17, [x23, x27]\n" - "fmla v3.4s, v15.4s, v10.4s\n" - "ldr x20, [%[inptrs], 96]\n" - "fmla v8.4s, v15.4s, v6.4s\n" - "ldr x22, [%[inptrs], 64]\n" - "fmla v2.4s, v18.4s, v10.4s\n" - "ldr q15, [x20, x27]\n" - "fmla v4.4s, v15.4s, v14.4s\n" - "ldr q18, [x22, x27]\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "ldr x21, [%[inptrs], 32]\n" - "fmla v8.4s, v16.4s, v0.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v2.4s, v15.4s, v9.4s\n" - "ldr q19, [x21, x27]\n" - "ldr q16, [x20, x27]\n" - "ldr x23, [%[inptrs], 136]\n" - "fmla v3.4s, v17.4s, v5.4s\n" - "ldr x20, [%[inptrs], 104]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr q14, [x23, x27]\n" - "fmla v2.4s, v18.4s, v5.4s\n" - "ldr q17, [x20, x27]\n" - "fmla v4.4s, v14.4s, v13.4s\n" - "ldr x22, [%[inptrs], 72]\n" - "fmla v3.4s, v15.4s, v7.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmax v8.4s, v8.4s, v11.4s\n" - "ldr q18, [x22, x27]\n" - "fmla v2.4s, v19.4s, v7.4s\n" - "ldr q13, [x20, x27]\n" - "fmla v4.4s, v17.4s, v10.4s\n" - "ldr x23, [%[inptrs], 144]\n" - "fmla v3.4s, v16.4s, v6.4s\n" - "ldr x20, [%[inptrs], 112]\n" - "fmin v8.4s, v8.4s, v12.4s\n" - "ldr q10, [x23, x27]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr q15, [x20, x27]\n" - "fmla v4.4s, v13.4s, v9.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v3.4s, v14.4s, v0.4s\n" - "ldr x23, [%[inptrs], 152]\n" - "ldr q9, [x20, x27]\n" - "ldr x22, [%[outptrs], 0]\n" - "fmla v2.4s, v18.4s, v0.4s\n" - "ldr q19, [x23, x27]\n" - "str q8, [x22, x28]\n" - "fmla v4.4s, v10.4s, v5.4s\n" - "fmla v3.4s, v13.4s, v1.4s\n" - "ldr x20, [%[inptrs], 192]\n" - "ldr x22, [%[outptrs], 8]\n" - "ldr x24, [%[outptrs], 16]\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v2.4s, v15.4s, v1.4s\n" - "ldr q16, [x20, x27]\n" - "fmla v4.4s, v15.4s, v7.4s\n" - "ldr q15, [%[wbptr]]\n" - "fmax v3.4s, v3.4s, v11.4s\n" - "ldr q14, [%[wbptr], #16]\n" - "mov v8.16b, v15.16b\n" - "ldr q10, [%[wbptr], #32]\n" - "fmax v2.4s, v2.4s, v11.4s\n" - "ldr q13, [%[wbptr], #64]\n" - "fmla v4.4s, v9.4s, v6.4s\n" - "ldr q7, [%[wbptr], #48]\n" - "fmin v3.4s, v3.4s, v12.4s\n" - "ldr q5, [%[wbptr], #80]\n" - "fmin v2.4s, v2.4s, v12.4s\n" - "ldr q9, [%[wbptr], #112]\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "add x27, x27, #16\n" - "str q3, [x24, x28]\n" - "fmla v4.4s, v19.4s, v0.4s\n" - "str q2, [x22, x28]\n" - "mov v3.16b, v15.16b\n" - "mov v2.16b, v15.16b\n" - "ldr q6, [%[wbptr], #128]\n" - "ldr x24, [%[outptrs], 24]\n" - "ldr x21, [%[inptrs], 0]\n" - "ldr x22, [%[inptrs], 40]\n" - "fmla v4.4s, v16.4s, v1.4s\n" - "ldr q0, [%[wbptr], #96]\n" - "ldr q17, [x21, x27]\n" - "ldr x20, [%[inptrs], 80]\n" - "fmla v8.4s, v17.4s, v14.4s\n" - "ldr q18, [x22, x27]\n" - "ldr q16, [x20, x27]\n" - "ldr x21, [%[inptrs], 8]\n" - "fmax v4.4s, v4.4s, v11.4s\n" - "ldr q1, [%[wbptr], #144]\n" - "ldr q11, [x21, x27]\n" - "ldr x22, [%[inptrs], 48]\n" - "fmla v8.4s, v18.4s, v13.4s\n" - "ldr x21, [%[inptrs], 16]\n" - "fmin v4.4s, v4.4s, v12.4s\n" - "ldr q19, [x22, x27]\n" - "ldr q12, [x21, x27]\n" - "ldr x23, [%[inptrs], 120]\n" - "ldr x20, [%[inptrs], 88]\n" - "subs x25, x25, #1\n" - "str q4, [x24, x28]\n" - "mov v4.16b, v15.16b\n" - "ldr q17, [x23, x27]\n" - "fmla v8.4s, v11.4s, v10.4s\n" - "ldr q15, [x20, x27]\n" - "add x28, x28, #16\n" - "fmla v8.4s, v16.4s, v9.4s\n" - "bne 2b\n" - "3:\n" - "fmla v3.4s, v16.4s, v14.4s\n" - "ldr x22, [%[inptrs], 56]\n" - "fmla v8.4s, v19.4s, v5.4s\n" - "ldr x21, [%[inptrs], 24]\n" - "fmla v2.4s, v12.4s, v14.4s\n" - "ldr q16, [x22, x27]\n" - "movi v11.16b, #0\n" - "ldr q18, [x21, x27]\n" - "fmla v3.4s, v17.4s, v13.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v8.4s, v12.4s, v7.4s\n" - "ldr x23, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v13.4s\n" - "ldr q19, [x20, x27]\n" - "fmov v12.4s, #6.0\n" - "ldr q17, [x23, x27]\n" - "fmla v3.4s, v15.4s, v10.4s\n" - "ldr x20, [%[inptrs], 96]\n" - "fmla v8.4s, v15.4s, v6.4s\n" - "ldr x22, [%[inptrs], 64]\n" - "fmla v2.4s, v18.4s, v10.4s\n" - "ldr q15, [x20, x27]\n" - "fmla v4.4s, v15.4s, v14.4s\n" - "ldr q18, [x22, x27]\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "ldr x21, [%[inptrs], 32]\n" - "fmla v8.4s, v16.4s, v0.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v2.4s, v15.4s, v9.4s\n" - "ldr q19, [x21, x27]\n" - "ldr q16, [x20, x27]\n" - "ldr x23, [%[inptrs], 136]\n" - "fmla v3.4s, v17.4s, v5.4s\n" - "ldr x20, [%[inptrs], 104]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr q14, [x23, x27]\n" - "fmla v2.4s, v18.4s, v5.4s\n" - "ldr q17, [x20, x27]\n" - "fmla v4.4s, v14.4s, v13.4s\n" - "ldr x22, [%[inptrs], 72]\n" - "fmla v3.4s, v15.4s, v7.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmax v8.4s, v8.4s, v11.4s\n" - "ldr q18, [x22, x27]\n" - "fmla v2.4s, v19.4s, v7.4s\n" - "ldr q13, [x20, x27]\n" - "fmla v4.4s, v17.4s, v10.4s\n" - "ldr x23, [%[inptrs], 144]\n" - "fmla v3.4s, v16.4s, v6.4s\n" - "ldr x20, [%[inptrs], 112]\n" - "fmin v8.4s, v8.4s, v12.4s\n" - "ldr q10, [x23, x27]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr q15, [x20, x27]\n" - "fmla v4.4s, v13.4s, v9.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v3.4s, v14.4s, v0.4s\n" - "ldr x23, [%[inptrs], 152]\n" - "ldr q9, [x20, x27]\n" - "ldr x22, [%[outptrs], 0]\n" - "fmla v2.4s, v18.4s, v0.4s\n" - "ldr q19, [x23, x27]\n" - "str q8, [x22, x28]\n" - "fmla v4.4s, v10.4s, v5.4s\n" - "fmla v3.4s, v13.4s, v1.4s\n" - "ldr x20, [%[inptrs], 192]\n" - "ldr x22, [%[outptrs], 8]\n" - "ldr x24, [%[outptrs], 16]\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v2.4s, v15.4s, v1.4s\n" - "ldr q16, [x20, x27]\n" - "fmla v4.4s, v15.4s, v7.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmax v3.4s, v3.4s, v11.4s\n" - "add x27, x27, #16\n" - "fmax v2.4s, v2.4s, v11.4s\n" - "fmla v4.4s, v9.4s, v6.4s\n" - "fmin v3.4s, v3.4s, v12.4s\n" - "fmin v2.4s, v2.4s, v12.4s\n" - "str q3, [x24, x28]\n" - "fmla v4.4s, v19.4s, v0.4s\n" - "str q2, [x22, x28]\n" - "ldr x24, [%[outptrs], 24]\n" - "fmla v4.4s, v16.4s, v1.4s\n" - "fmax v4.4s, v4.4s, v11.4s\n" - "fmin v4.4s, v4.4s, v12.4s\n" - "str q4, [x24, x28]\n" - "add x28, x28, #16\n" - "4:\n" - "cbz x26, 7f\n" - "ldr s15, [%[wbptr]]\n" - "mov v8.16b, v15.16b\n" - "ldr s14, [%[wbptr], #4]\n" - "mov v3.16b, v15.16b\n" - "ldr s10, [%[wbptr], #8]\n" - "mov v2.16b, v15.16b\n" - "ldr s7, [%[wbptr], #12]\n" - "mov v4.16b, v15.16b\n" - "ldr s13, [%[wbptr], #16]\n" - "ldr s5, [%[wbptr], #20]\n" - "ldr x21, [%[inptrs], 0]\n" - "ldr s0, [%[wbptr], #24]\n" - "ldr x22, [%[inptrs], 40]\n" - "ldr s9, [%[wbptr], #28]\n" - "ldr x20, [%[inptrs], 80]\n" - "ldr s6, [%[wbptr], #32]\n" - "ldr x23, [%[inptrs], 120]\n" - "ldr s1, [%[wbptr], #36]\n" - "subs x26, x26, #1\n" - "ldr s17, [x21, x27]\n" - "ldr s18, [x22, x27]\n" - "fmla v8.4s, v17.4s, v14.4s\n" - "ldr s16, [x20, x27]\n" - "ldr s17, [x23, x27]\n" - "ldr x21, [%[inptrs], 8]\n" - "ldr x22, [%[inptrs], 48]\n" - "ldr x20, [%[inptrs], 88]\n" - "ldr s11, [x21, x27]\n" - "fmla v8.4s, v18.4s, v13.4s\n" - "ldr s19, [x22, x27]\n" - "ldr s15, [x20, x27]\n" - "ldr x21, [%[inptrs], 16]\n" - "ldr s12, [x21, x27]\n" - "fmla v8.4s, v11.4s, v10.4s\n" - "fmla v8.4s, v16.4s, v9.4s\n" - "beq 6f\n" - "5:\n" - "fmla v3.4s, v16.4s, v14.4s\n" - "ldr x22, [%[inptrs], 56]\n" - "fmla v8.4s, v19.4s, v5.4s\n" - "ldr x21, [%[inptrs], 24]\n" - "fmla v2.4s, v12.4s, v14.4s\n" - "ldr s16, [x22, x27]\n" - "movi v11.16b, #0\n" - "ldr s18, [x21, x27]\n" - "fmla v3.4s, v17.4s, v13.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v8.4s, v12.4s, v7.4s\n" - "ldr x23, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v13.4s\n" - "ldr s19, [x20, x27]\n" - "fmov v12.4s, #6.0\n" - "ldr s17, [x23, x27]\n" - "fmla v3.4s, v15.4s, v10.4s\n" - "ldr x20, [%[inptrs], 96]\n" - "fmla v8.4s, v15.4s, v6.4s\n" - "ldr x22, [%[inptrs], 64]\n" - "fmla v2.4s, v18.4s, v10.4s\n" - "ldr s15, [x20, x27]\n" - "fmla v4.4s, v15.4s, v14.4s\n" - "ldr s18, [x22, x27]\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "ldr x21, [%[inptrs], 32]\n" - "fmla v8.4s, v16.4s, v0.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v2.4s, v15.4s, v9.4s\n" - "ldr s19, [x21, x27]\n" - "ldr s16, [x20, x27]\n" - "ldr x23, [%[inptrs], 136]\n" - "fmla v3.4s, v17.4s, v5.4s\n" - "ldr x20, [%[inptrs], 104]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr s14, [x23, x27]\n" - "fmla v2.4s, v18.4s, v5.4s\n" - "ldr s17, [x20, x27]\n" - "fmla v4.4s, v14.4s, v13.4s\n" - "ldr x22, [%[inptrs], 72]\n" - "fmla v3.4s, v15.4s, v7.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmax v8.4s, v8.4s, v11.4s\n" - "ldr s18, [x22, x27]\n" - "fmla v2.4s, v19.4s, v7.4s\n" - "ldr s13, [x20, x27]\n" - "fmla v4.4s, v17.4s, v10.4s\n" - "ldr x23, [%[inptrs], 144]\n" - "fmla v3.4s, v16.4s, v6.4s\n" - "ldr x20, [%[inptrs], 112]\n" - "fmin v8.4s, v8.4s, v12.4s\n" - "ldr s10, [x23, x27]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr s15, [x20, x27]\n" - "fmla v4.4s, v13.4s, v9.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v3.4s, v14.4s, v0.4s\n" - "ldr x23, [%[inptrs], 152]\n" - "ldr s9, [x20, x27]\n" - "ldr x22, [%[outptrs], 0]\n" - "fmla v2.4s, v18.4s, v0.4s\n" - "ldr s19, [x23, x27]\n" - "str s8, [x22, x28]\n" - "fmla v4.4s, v10.4s, v5.4s\n" - "fmla v3.4s, v13.4s, v1.4s\n" - "ldr x20, [%[inptrs], 192]\n" - "ldr x22, [%[outptrs], 8]\n" - "ldr x24, [%[outptrs], 16]\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v2.4s, v15.4s, v1.4s\n" - "ldr s16, [x20, x27]\n" - "fmla v4.4s, v15.4s, v7.4s\n" - "ldr s15, [%[wbptr]]\n" - "fmax v3.4s, v3.4s, v11.4s\n" - "ldr s14, [%[wbptr], #4]\n" - "mov v8.16b, v15.16b\n" - "ldr s10, [%[wbptr], #8]\n" - "fmax v2.4s, v2.4s, v11.4s\n" - "ldr s13, [%[wbptr], #16]\n" - "fmla v4.4s, v9.4s, v6.4s\n" - "ldr s7, [%[wbptr], #12]\n" - "fmin v3.4s, v3.4s, v12.4s\n" - "ldr s5, [%[wbptr], #20]\n" - "fmin v2.4s, v2.4s, v12.4s\n" - "ldr s9, [%[wbptr], #28]\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "add x27, x27, #4\n" - "str s3, [x24, x28]\n" - "fmla v4.4s, v19.4s, v0.4s\n" - "str s2, [x22, x28]\n" - "mov v3.16b, v15.16b\n" - "mov v2.16b, v15.16b\n" - "ldr s6, [%[wbptr], #32]\n" - "ldr x24, [%[outptrs], 24]\n" - "ldr x21, [%[inptrs], 0]\n" - "ldr x22, [%[inptrs], 40]\n" - "fmla v4.4s, v16.4s, v1.4s\n" - "ldr s0, [%[wbptr], #24]\n" - "ldr s17, [x21, x27]\n" - "ldr x20, [%[inptrs], 80]\n" - "fmla v8.4s, v17.4s, v14.4s\n" - "ldr s18, [x22, x27]\n" - "ldr s16, [x20, x27]\n" - "ldr x21, [%[inptrs], 8]\n" - "fmax v4.4s, v4.4s, v11.4s\n" - "ldr s1, [%[wbptr], #36]\n" - "ldr s11, [x21, x27]\n" - "ldr x22, [%[inptrs], 48]\n" - "fmla v8.4s, v18.4s, v13.4s\n" - "ldr x21, [%[inptrs], 16]\n" - "fmin v4.4s, v4.4s, v12.4s\n" - "ldr s19, [x22, x27]\n" - "ldr s12, [x21, x27]\n" - "ldr x23, [%[inptrs], 120]\n" - "ldr x20, [%[inptrs], 88]\n" - "subs x26, x26, #1\n" - "str s4, [x24, x28]\n" - "mov v4.16b, v15.16b\n" - "ldr s17, [x23, x27]\n" - "fmla v8.4s, v11.4s, v10.4s\n" - "ldr s15, [x20, x27]\n" - "add x28, x28, #4\n" - "fmla v8.4s, v16.4s, v9.4s\n" - "bne 5b\n" - "6:\n" - "fmla v3.4s, v16.4s, v14.4s\n" - "ldr x22, [%[inptrs], 56]\n" - "fmla v8.4s, v19.4s, v5.4s\n" - "ldr x21, [%[inptrs], 24]\n" - "fmla v2.4s, v12.4s, v14.4s\n" - "ldr s16, [x22, x27]\n" - "movi v11.16b, #0\n" - "ldr s18, [x21, x27]\n" - "fmla v3.4s, v17.4s, v13.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v8.4s, v12.4s, v7.4s\n" - "ldr x23, [%[inptrs], 128]\n" - "fmla v2.4s, v16.4s, v13.4s\n" - "ldr s19, [x20, x27]\n" - "fmov v12.4s, #6.0\n" - "ldr s17, [x23, x27]\n" - "fmla v3.4s, v15.4s, v10.4s\n" - "ldr x20, [%[inptrs], 96]\n" - "fmla v8.4s, v15.4s, v6.4s\n" - "ldr x22, [%[inptrs], 64]\n" - "fmla v2.4s, v18.4s, v10.4s\n" - "ldr s15, [x20, x27]\n" - "fmla v4.4s, v15.4s, v14.4s\n" - "ldr s18, [x22, x27]\n" - "fmla v3.4s, v19.4s, v9.4s\n" - "ldr x21, [%[inptrs], 32]\n" - "fmla v8.4s, v16.4s, v0.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v2.4s, v15.4s, v9.4s\n" - "ldr s19, [x21, x27]\n" - "ldr s16, [x20, x27]\n" - "ldr x23, [%[inptrs], 136]\n" - "fmla v3.4s, v17.4s, v5.4s\n" - "ldr x20, [%[inptrs], 104]\n" - "fmla v8.4s, v15.4s, v1.4s\n" - "ldr s14, [x23, x27]\n" - "fmla v2.4s, v18.4s, v5.4s\n" - "ldr s17, [x20, x27]\n" - "fmla v4.4s, v14.4s, v13.4s\n" - "ldr x22, [%[inptrs], 72]\n" - "fmla v3.4s, v15.4s, v7.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmax v8.4s, v8.4s, v11.4s\n" - "ldr s18, [x22, x27]\n" - "fmla v2.4s, v19.4s, v7.4s\n" - "ldr s13, [x20, x27]\n" - "fmla v4.4s, v17.4s, v10.4s\n" - "ldr x23, [%[inptrs], 144]\n" - "fmla v3.4s, v16.4s, v6.4s\n" - "ldr x20, [%[inptrs], 112]\n" - "fmin v8.4s, v8.4s, v12.4s\n" - "ldr s10, [x23, x27]\n" - "fmla v2.4s, v17.4s, v6.4s\n" - "ldr s15, [x20, x27]\n" - "fmla v4.4s, v13.4s, v9.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v3.4s, v14.4s, v0.4s\n" - "ldr x23, [%[inptrs], 152]\n" - "ldr s9, [x20, x27]\n" - "ldr x22, [%[outptrs], 0]\n" - "fmla v2.4s, v18.4s, v0.4s\n" - "ldr s19, [x23, x27]\n" - "str s8, [x22, x28]\n" - "fmla v4.4s, v10.4s, v5.4s\n" - "fmla v3.4s, v13.4s, v1.4s\n" - "ldr x20, [%[inptrs], 192]\n" - "ldr x22, [%[outptrs], 8]\n" - "ldr x24, [%[outptrs], 16]\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v2.4s, v15.4s, v1.4s\n" - "ldr s16, [x20, x27]\n" - "fmla v4.4s, v15.4s, v7.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmax v3.4s, v3.4s, v11.4s\n" - "add x27, x27, #4\n" - "fmax v2.4s, v2.4s, v11.4s\n" - "fmla v4.4s, v9.4s, v6.4s\n" - "fmin v3.4s, v3.4s, v12.4s\n" - "fmin v2.4s, v2.4s, v12.4s\n" - "str s3, [x24, x28]\n" - "fmla v4.4s, v19.4s, v0.4s\n" - "str s2, [x22, x28]\n" - "ldr x24, [%[outptrs], 24]\n" - "fmla v4.4s, v16.4s, v1.4s\n" - "fmax v4.4s, v4.4s, v11.4s\n" - "fmin v4.4s, v4.4s, v12.4s\n" - "str s4, [x24, x28]\n" - "add x28, x28, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr) - : [inptrs] "r" (inptrs), [outptrs] "r" (outptrs), [n_channels] "r" ((long) n_channels) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -#endif // __aarch64__ - -template class DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp deleted file mode 100644 index 2142c431ac..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_1x1_fp32_fp32.cpp +++ /dev/null @@ -1,2341 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "impl_fp32_fp32.hpp" - -namespace depthwise -{ - -using namespace neon_convolution_kernels; -using Conv = DepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float, float>; - -#ifdef __aarch64__ -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x20, %[inptr0], %[input_row_stride]\n" - "add x13, %[input_col_stride1], %[input_col_stride1]\n" - "add x24, %[outptr0], %[output_row_stride]\n" - "add x21, x20, %[input_row_stride]\n" - "add x14, x13, #64\n" - "add x15, x13, %[input_col_stride1]\n" - "add x22, x21, %[input_row_stride]\n" - "add x16, x15, #64\n" - "add x17, x15, %[input_col_stride1]\n" - "add x23, x22, %[input_row_stride]\n" - "add x9, x17, #64\n" - "add x25, x24, %[output_row_stride]\n" - "add x26, %[output_col_stride1], %[output_col_stride1]\n" - "and x27, %[n_channels], #3\n" - "lsr x28, %[n_channels], #2\n" - "cbz x28, 4f\n" - "1:\n" - "ldr q25, [%[wbptr]]\n" - "subs x28, x28, #1\n" - "mov v17.16b, v25.16b\n" - "ldr q16, [%[wbptr], #16]\n" - "mov v13.16b, v25.16b\n" - "ldr q7, [%[wbptr], #32]\n" - "mov v15.16b, v25.16b\n" - "ldr q6, [%[wbptr], #48]\n" - "mov v10.16b, v25.16b\n" - "ldr q5, [%[wbptr], #64]\n" - "mov v12.16b, v25.16b\n" - "ldr q4, [%[wbptr], #80]\n" - "mov v14.16b, v25.16b\n" - "ldr q3, [%[wbptr], #96]\n" - "mov v9.16b, v25.16b\n" - "ldr q2, [%[wbptr], #112]\n" - "mov v11.16b, v25.16b\n" - "ldr q1, [%[wbptr], #128]\n" - "mov v8.16b, v25.16b\n" - "ldr q0, [%[wbptr], #144]\n" - "ldr q26, [%[inptr0]]\n" - "ldr q28, [x20]\n" - "fmla v17.4s, v26.4s, v16.4s\n" - "ldr q29, [%[inptr0], %[input_col_stride1]]\n" - "fmla v13.4s, v28.4s, v16.4s\n" - "ldr q27, [x21]\n" - "fmla v15.4s, v29.4s, v16.4s\n" - "ldr q21, [x20, %[input_col_stride1]]\n" - "fmla v17.4s, v28.4s, v5.4s\n" - "ldr q20, [%[inptr0], x13]\n" - "ldr q23, [x22]\n" - "ldr q19, [x21, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x20, #64]\n" - "fmla v17.4s, v29.4s, v7.4s\n" - "prfm pldl1keep, [%[inptr0], x19]\n" - "prfm pldl1keep, [x21, #64]\n" - "prfm pldl1keep, [x20, x19]\n" - "prfm pldl1keep, [%[inptr0], x14]\n" - "prfm pldl1keep, [x22, #64]\n" - "prfm pldl1keep, [x21, x19]\n" - "beq 3f\n" - "2:\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "ldr q30, [x20, x13]\n" - "fmla v13.4s, v27.4s, v5.4s\n" - "ldr q29, [%[inptr0], x15]\n" - "fmla v10.4s, v27.4s, v16.4s\n" - "ldr q28, [x23]\n" - "fmla v17.4s, v21.4s, v4.4s\n" - "ldr q24, [x22, %[input_col_stride1]]\n" - "fmla v13.4s, v21.4s, v7.4s\n" - "ldr q18, [x21, x13]\n" - "fmla v15.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [x20, x14]\n" - "fmla v12.4s, v21.4s, v16.4s\n" - "ldr q22, [x20, x15]\n" - "fmla v17.4s, v20.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v15.4s, v20.4s, v7.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v14.4s, v20.4s, v16.4s\n" - "ldr q25, [%[inptr0], x17]\n" - "fmla v13.4s, v23.4s, v2.4s\n" - "prfm pldl1keep, [x22, x19]\n" - "fmla v10.4s, v23.4s, v5.4s\n" - "ldr q26, [x23, %[input_col_stride1]]\n" - "fmla v17.4s, v19.4s, v1.4s\n" - "prfm pldl1keep, [x21, x14]\n" - "fmla v13.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v15.4s, v19.4s, v2.4s\n" - "prfm pldl1keep, [%[inptr0], x9]\n" - "fmla v10.4s, v19.4s, v7.4s\n" - "prfm pldl1keep, [x23, x19]\n" - "fmla v12.4s, v19.4s, v5.4s\n" - "prfm pldl1keep, [x22, x14]\n" - "fmla v9.4s, v19.4s, v16.4s\n" - "ldr q27, [x22, x13]\n" - "fmla v17.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v13.4s, v30.4s, v6.4s\n" - "prfm pldl1keep, [x20, x9]\n" - "fmla v15.4s, v30.4s, v4.4s\n" - "prfm pldl1keep, [x23, x14]\n" - "fmla v12.4s, v30.4s, v7.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x21, x9]\n" - "fmla v11.4s, v30.4s, v16.4s\n" - "ldr q21, [x21, x15]\n" - "fmla v15.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "ldr q20, [x20, x17]\n" - "fmla v10.4s, v28.4s, v2.4s\n" - "ldr q19, [x23, x13]\n" - "fmla v13.4s, v24.4s, v1.4s\n" - "prfm pldl1keep, [x22, x9]\n" - "fmla v12.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x9]\n" - "fmla v10.4s, v24.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v9.4s, v24.4s, v5.4s\n" - "ldr q23, [x22, x15]\n" - "fmla v17.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v13.4s, v18.4s, v3.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v15.4s, v18.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "str q17, [%[outptr0]]\n" - "fmla v10.4s, v18.4s, v6.4s\n" - "fmla v12.4s, v18.4s, v4.4s\n" - "ldr q17, [x21, x17]\n" - "fmla v14.4s, v18.4s, v2.4s\n" - "prfm pldl1keep, [%[inptr0], x19]\n" - "fmla v9.4s, v18.4s, v7.4s\n" - "prfm pldl1keep, [%[inptr0], x14]\n" - "fmla v11.4s, v18.4s, v5.4s\n" - "add x20, x20, #16\n" - "fmla v8.4s, v18.4s, v16.4s\n" - "ldr q24, [x23, x15]\n" - "fmla v15.4s, v22.4s, v3.4s\n" - "ldr q18, [x22, x17]\n" - "fmla v12.4s, v22.4s, v6.4s\n" - "prfm pldl1keep, [x20, #64]\n" - "fmla v14.4s, v22.4s, v4.4s\n" - "prfm pldl1keep, [x20, x19]\n" - "fmla v11.4s, v22.4s, v7.4s\n" - "ldr q22, [x23, x17]\n" - "fmla v10.4s, v26.4s, v1.4s\n" - "add x21, x21, #16\n" - "fmla v14.4s, v25.4s, v6.4s\n" - "ldr q25, [%[wbptr]]\n" - "fmla v9.4s, v26.4s, v2.4s\n" - "ldr q16, [%[wbptr], #16]\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "prfm pldl1keep, [x21, #64]\n" - "fmla v10.4s, v27.4s, v3.4s\n" - "prfm pldl1keep, [x21, x19]\n" - "fmla v12.4s, v27.4s, v1.4s\n" - "add x22, x22, #16\n" - "str q13, [x24]\n" - "fmla v9.4s, v27.4s, v4.4s\n" - "fmla v11.4s, v27.4s, v2.4s\n" - "ldr q26, [%[inptr0]]\n" - "fmla v8.4s, v27.4s, v5.4s\n" - "ldr q28, [x20]\n" - "fmla v15.4s, v21.4s, v0.4s\n" - "ldr q29, [%[inptr0], %[input_col_stride1]]\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v14.4s, v21.4s, v1.4s\n" - "add x23, x23, #16\n" - "str q15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "ldr q5, [%[wbptr], #64]\n" - "fmla v8.4s, v21.4s, v7.4s\n" - "ldr q27, [x21]\n" - "fmla v14.4s, v20.4s, v3.4s\n" - "ldr q21, [x20, %[input_col_stride1]]\n" - "fmla v11.4s, v20.4s, v6.4s\n" - "ldr q20, [%[inptr0], x13]\n" - "fmla v10.4s, v19.4s, v0.4s\n" - "subs x28, x28, #1\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "fmla v8.4s, v19.4s, v2.4s\n" - "fmla v12.4s, v23.4s, v0.4s\n" - "ldr q7, [%[wbptr], #32]\n" - "str q10, [x25]\n" - "fmla v11.4s, v23.4s, v1.4s\n" - "fmla v9.4s, v23.4s, v3.4s\n" - "ldr q2, [%[wbptr], #112]\n" - "str q12, [x24, %[output_col_stride1]]\n" - "fmla v8.4s, v23.4s, v4.4s\n" - "fmla v14.4s, v17.4s, v0.4s\n" - "ldr q23, [x22]\n" - "fmla v11.4s, v17.4s, v3.4s\n" - "ldr q19, [x21, %[input_col_stride1]]\n" - "fmla v8.4s, v17.4s, v6.4s\n" - "ldr q4, [%[wbptr], #80]\n" - "str q14, [%[outptr0], x26]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "fmla v11.4s, v18.4s, v0.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v8.4s, v24.4s, v1.4s\n" - "ldr q6, [%[wbptr], #48]\n" - "str q9, [x25, %[output_col_stride1]]\n" - "mov v17.16b, v25.16b\n" - "str q11, [x24, x26]\n" - "mov v13.16b, v25.16b\n" - "fmla v8.4s, v18.4s, v3.4s\n" - "ldr q1, [%[wbptr], #128]\n" - "mov v15.16b, v25.16b\n" - "add x24, x24, #16\n" - "mov v10.16b, v25.16b\n" - "mov v12.16b, v25.16b\n" - "fmla v8.4s, v22.4s, v0.4s\n" - "ldr q3, [%[wbptr], #96]\n" - "mov v14.16b, v25.16b\n" - "mov v9.16b, v25.16b\n" - "mov v11.16b, v25.16b\n" - "fmla v17.4s, v26.4s, v16.4s\n" - "str q8, [x25, x26]\n" - "fmla v13.4s, v28.4s, v16.4s\n" - "mov v8.16b, v25.16b\n" - "ldr q0, [%[wbptr], #144]\n" - "fmla v17.4s, v28.4s, v5.4s\n" - "fmla v15.4s, v29.4s, v16.4s\n" - "add x25, x25, #16\n" - "fmla v17.4s, v29.4s, v7.4s\n" - "bne 2b\n" - "3:\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "ldr q30, [x20, x13]\n" - "fmla v13.4s, v27.4s, v5.4s\n" - "ldr q29, [%[inptr0], x15]\n" - "fmla v10.4s, v27.4s, v16.4s\n" - "ldr q28, [x23]\n" - "fmla v17.4s, v21.4s, v4.4s\n" - "ldr q24, [x22, %[input_col_stride1]]\n" - "fmla v13.4s, v21.4s, v7.4s\n" - "ldr q18, [x21, x13]\n" - "fmla v15.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [x20, x14]\n" - "fmla v12.4s, v21.4s, v16.4s\n" - "ldr q22, [x20, x15]\n" - "fmla v17.4s, v20.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v15.4s, v20.4s, v7.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v14.4s, v20.4s, v16.4s\n" - "ldr q25, [%[inptr0], x17]\n" - "fmla v13.4s, v23.4s, v2.4s\n" - "prfm pldl1keep, [x22, x19]\n" - "fmla v10.4s, v23.4s, v5.4s\n" - "ldr q26, [x23, %[input_col_stride1]]\n" - "fmla v17.4s, v19.4s, v1.4s\n" - "prfm pldl1keep, [x21, x14]\n" - "fmla v13.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v15.4s, v19.4s, v2.4s\n" - "prfm pldl1keep, [%[inptr0], x9]\n" - "fmla v10.4s, v19.4s, v7.4s\n" - "prfm pldl1keep, [x23, x19]\n" - "fmla v12.4s, v19.4s, v5.4s\n" - "prfm pldl1keep, [x22, x14]\n" - "fmla v9.4s, v19.4s, v16.4s\n" - "ldr q27, [x22, x13]\n" - "fmla v17.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v13.4s, v30.4s, v6.4s\n" - "prfm pldl1keep, [x20, x9]\n" - "fmla v15.4s, v30.4s, v4.4s\n" - "prfm pldl1keep, [x23, x14]\n" - "fmla v12.4s, v30.4s, v7.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x21, x9]\n" - "fmla v11.4s, v30.4s, v16.4s\n" - "ldr q21, [x21, x15]\n" - "fmla v15.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "ldr q20, [x20, x17]\n" - "fmla v10.4s, v28.4s, v2.4s\n" - "ldr q19, [x23, x13]\n" - "fmla v13.4s, v24.4s, v1.4s\n" - "prfm pldl1keep, [x22, x9]\n" - "fmla v12.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x9]\n" - "fmla v10.4s, v24.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v9.4s, v24.4s, v5.4s\n" - "ldr q23, [x22, x15]\n" - "fmla v17.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v13.4s, v18.4s, v3.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v15.4s, v18.4s, v1.4s\n" - "add x20, x20, #16\n" - "str q17, [%[outptr0]]\n" - "fmla v10.4s, v18.4s, v6.4s\n" - "fmla v12.4s, v18.4s, v4.4s\n" - "ldr q17, [x21, x17]\n" - "fmla v14.4s, v18.4s, v2.4s\n" - "add x21, x21, #16\n" - "fmla v9.4s, v18.4s, v7.4s\n" - "fmla v11.4s, v18.4s, v5.4s\n" - "fmla v8.4s, v18.4s, v16.4s\n" - "ldr q24, [x23, x15]\n" - "fmla v15.4s, v22.4s, v3.4s\n" - "ldr q18, [x22, x17]\n" - "fmla v12.4s, v22.4s, v6.4s\n" - "add x22, x22, #16\n" - "fmla v14.4s, v22.4s, v4.4s\n" - "fmla v11.4s, v22.4s, v7.4s\n" - "fmla v10.4s, v26.4s, v1.4s\n" - "ldr q22, [x23, x17]\n" - "fmla v9.4s, v26.4s, v2.4s\n" - "add x23, x23, #16\n" - "fmla v14.4s, v25.4s, v6.4s\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "fmla v10.4s, v27.4s, v3.4s\n" - "fmla v12.4s, v27.4s, v1.4s\n" - "fmla v9.4s, v27.4s, v4.4s\n" - "fmla v11.4s, v27.4s, v2.4s\n" - "str q13, [x24]\n" - "fmla v8.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v21.4s, v0.4s\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "fmla v14.4s, v21.4s, v1.4s\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "fmla v8.4s, v21.4s, v7.4s\n" - "str q15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v10.4s, v19.4s, v0.4s\n" - "fmla v14.4s, v20.4s, v3.4s\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "fmla v11.4s, v20.4s, v6.4s\n" - "fmla v8.4s, v19.4s, v2.4s\n" - "str q10, [x25]\n" - "fmla v12.4s, v23.4s, v0.4s\n" - "fmla v9.4s, v23.4s, v3.4s\n" - "fmla v14.4s, v17.4s, v0.4s\n" - "fmla v11.4s, v23.4s, v1.4s\n" - "fmla v8.4s, v23.4s, v4.4s\n" - "str q12, [x24, %[output_col_stride1]]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "str q14, [%[outptr0], x26]\n" - "fmla v11.4s, v17.4s, v3.4s\n" - "fmla v8.4s, v17.4s, v6.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "str q9, [x25, %[output_col_stride1]]\n" - "fmla v11.4s, v18.4s, v0.4s\n" - "fmla v8.4s, v24.4s, v1.4s\n" - "str q11, [x24, x26]\n" - "fmla v8.4s, v18.4s, v3.4s\n" - "add x24, x24, #16\n" - "fmla v8.4s, v22.4s, v0.4s\n" - "str q8, [x25, x26]\n" - "add x25, x25, #16\n" - "4:\n" - "cbz x27, 7f\n" - "ldr s25, [%[wbptr]]\n" - "mov v17.16b, v25.16b\n" - "ldr s16, [%[wbptr], #4]\n" - "mov v13.16b, v25.16b\n" - "ldr s7, [%[wbptr], #8]\n" - "mov v15.16b, v25.16b\n" - "ldr s6, [%[wbptr], #12]\n" - "mov v10.16b, v25.16b\n" - "ldr s5, [%[wbptr], #16]\n" - "mov v12.16b, v25.16b\n" - "ldr s4, [%[wbptr], #20]\n" - "mov v14.16b, v25.16b\n" - "ldr s3, [%[wbptr], #24]\n" - "mov v9.16b, v25.16b\n" - "ldr s2, [%[wbptr], #28]\n" - "mov v11.16b, v25.16b\n" - "ldr s1, [%[wbptr], #32]\n" - "mov v8.16b, v25.16b\n" - "ldr s0, [%[wbptr], #36]\n" - "ldr s26, [%[inptr0]]\n" - "subs x27, x27, #1\n" - "fmla v17.4s, v26.4s, v16.4s\n" - "ldr s28, [x20]\n" - "fmla v13.4s, v28.4s, v16.4s\n" - "ldr s29, [%[inptr0], %[input_col_stride1]]\n" - "fmla v15.4s, v29.4s, v16.4s\n" - "ldr s27, [x21]\n" - "fmla v17.4s, v28.4s, v5.4s\n" - "ldr s21, [x20, %[input_col_stride1]]\n" - "ldr s20, [%[inptr0], x13]\n" - "ldr s23, [x22]\n" - "ldr s19, [x21, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v17.4s, v29.4s, v7.4s\n" - "prfm pldl1keep, [x20, #64]\n" - "prfm pldl1keep, [%[inptr0], x19]\n" - "prfm pldl1keep, [x21, #64]\n" - "prfm pldl1keep, [x20, x19]\n" - "prfm pldl1keep, [%[inptr0], x14]\n" - "prfm pldl1keep, [x22, #64]\n" - "prfm pldl1keep, [x21, x19]\n" - "beq 6f\n" - "5:\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "ldr s30, [x20, x13]\n" - "fmla v13.4s, v27.4s, v5.4s\n" - "ldr s29, [%[inptr0], x15]\n" - "fmla v10.4s, v27.4s, v16.4s\n" - "ldr s28, [x23]\n" - "fmla v17.4s, v21.4s, v4.4s\n" - "ldr s24, [x22, %[input_col_stride1]]\n" - "fmla v13.4s, v21.4s, v7.4s\n" - "ldr s18, [x21, x13]\n" - "fmla v15.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [x20, x14]\n" - "fmla v12.4s, v21.4s, v16.4s\n" - "ldr s22, [x20, x15]\n" - "fmla v17.4s, v20.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v15.4s, v20.4s, v7.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v14.4s, v20.4s, v16.4s\n" - "ldr s25, [%[inptr0], x17]\n" - "fmla v13.4s, v23.4s, v2.4s\n" - "prfm pldl1keep, [x22, x19]\n" - "fmla v10.4s, v23.4s, v5.4s\n" - "ldr s26, [x23, %[input_col_stride1]]\n" - "fmla v17.4s, v19.4s, v1.4s\n" - "prfm pldl1keep, [x21, x14]\n" - "fmla v13.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v15.4s, v19.4s, v2.4s\n" - "prfm pldl1keep, [%[inptr0], x9]\n" - "fmla v10.4s, v19.4s, v7.4s\n" - "prfm pldl1keep, [x23, x19]\n" - "fmla v12.4s, v19.4s, v5.4s\n" - "prfm pldl1keep, [x22, x14]\n" - "fmla v9.4s, v19.4s, v16.4s\n" - "ldr s27, [x22, x13]\n" - "fmla v17.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v13.4s, v30.4s, v6.4s\n" - "prfm pldl1keep, [x20, x9]\n" - "fmla v15.4s, v30.4s, v4.4s\n" - "prfm pldl1keep, [x23, x14]\n" - "fmla v12.4s, v30.4s, v7.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x21, x9]\n" - "fmla v11.4s, v30.4s, v16.4s\n" - "ldr s21, [x21, x15]\n" - "fmla v15.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "ldr s20, [x20, x17]\n" - "fmla v10.4s, v28.4s, v2.4s\n" - "ldr s19, [x23, x13]\n" - "fmla v13.4s, v24.4s, v1.4s\n" - "prfm pldl1keep, [x22, x9]\n" - "fmla v12.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x9]\n" - "fmla v10.4s, v24.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v9.4s, v24.4s, v5.4s\n" - "ldr s23, [x22, x15]\n" - "fmla v17.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v13.4s, v18.4s, v3.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v15.4s, v18.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "str s17, [%[outptr0]]\n" - "fmla v10.4s, v18.4s, v6.4s\n" - "fmla v12.4s, v18.4s, v4.4s\n" - "ldr s17, [x21, x17]\n" - "fmla v14.4s, v18.4s, v2.4s\n" - "prfm pldl1keep, [%[inptr0], x19]\n" - "fmla v9.4s, v18.4s, v7.4s\n" - "prfm pldl1keep, [%[inptr0], x14]\n" - "fmla v11.4s, v18.4s, v5.4s\n" - "add x20, x20, #4\n" - "fmla v8.4s, v18.4s, v16.4s\n" - "ldr s24, [x23, x15]\n" - "fmla v15.4s, v22.4s, v3.4s\n" - "ldr s18, [x22, x17]\n" - "fmla v12.4s, v22.4s, v6.4s\n" - "prfm pldl1keep, [x20, #64]\n" - "fmla v14.4s, v22.4s, v4.4s\n" - "prfm pldl1keep, [x20, x19]\n" - "fmla v11.4s, v22.4s, v7.4s\n" - "ldr s22, [x23, x17]\n" - "fmla v10.4s, v26.4s, v1.4s\n" - "add x21, x21, #4\n" - "fmla v14.4s, v25.4s, v6.4s\n" - "ldr s25, [%[wbptr]]\n" - "fmla v9.4s, v26.4s, v2.4s\n" - "ldr s16, [%[wbptr], #4]\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "prfm pldl1keep, [x21, #64]\n" - "fmla v10.4s, v27.4s, v3.4s\n" - "prfm pldl1keep, [x21, x19]\n" - "fmla v12.4s, v27.4s, v1.4s\n" - "add x22, x22, #4\n" - "str s13, [x24]\n" - "fmla v9.4s, v27.4s, v4.4s\n" - "fmla v11.4s, v27.4s, v2.4s\n" - "ldr s26, [%[inptr0]]\n" - "fmla v8.4s, v27.4s, v5.4s\n" - "ldr s28, [x20]\n" - "fmla v15.4s, v21.4s, v0.4s\n" - "ldr s29, [%[inptr0], %[input_col_stride1]]\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v14.4s, v21.4s, v1.4s\n" - "add x23, x23, #4\n" - "str s15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "ldr s5, [%[wbptr], #16]\n" - "fmla v8.4s, v21.4s, v7.4s\n" - "ldr s27, [x21]\n" - "fmla v14.4s, v20.4s, v3.4s\n" - "ldr s21, [x20, %[input_col_stride1]]\n" - "fmla v11.4s, v20.4s, v6.4s\n" - "ldr s20, [%[inptr0], x13]\n" - "fmla v10.4s, v19.4s, v0.4s\n" - "subs x27, x27, #1\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "fmla v8.4s, v19.4s, v2.4s\n" - "fmla v12.4s, v23.4s, v0.4s\n" - "ldr s7, [%[wbptr], #8]\n" - "str s10, [x25]\n" - "fmla v11.4s, v23.4s, v1.4s\n" - "fmla v9.4s, v23.4s, v3.4s\n" - "ldr s2, [%[wbptr], #28]\n" - "str s12, [x24, %[output_col_stride1]]\n" - "fmla v8.4s, v23.4s, v4.4s\n" - "fmla v14.4s, v17.4s, v0.4s\n" - "ldr s23, [x22]\n" - "fmla v11.4s, v17.4s, v3.4s\n" - "ldr s19, [x21, %[input_col_stride1]]\n" - "fmla v8.4s, v17.4s, v6.4s\n" - "ldr s4, [%[wbptr], #20]\n" - "str s14, [%[outptr0], x26]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "fmla v11.4s, v18.4s, v0.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v8.4s, v24.4s, v1.4s\n" - "ldr s6, [%[wbptr], #12]\n" - "str s9, [x25, %[output_col_stride1]]\n" - "mov v17.16b, v25.16b\n" - "str s11, [x24, x26]\n" - "mov v13.16b, v25.16b\n" - "fmla v8.4s, v18.4s, v3.4s\n" - "ldr s1, [%[wbptr], #32]\n" - "mov v15.16b, v25.16b\n" - "add x24, x24, #4\n" - "mov v10.16b, v25.16b\n" - "mov v12.16b, v25.16b\n" - "fmla v8.4s, v22.4s, v0.4s\n" - "ldr s3, [%[wbptr], #24]\n" - "mov v14.16b, v25.16b\n" - "mov v9.16b, v25.16b\n" - "mov v11.16b, v25.16b\n" - "fmla v17.4s, v26.4s, v16.4s\n" - "str s8, [x25, x26]\n" - "fmla v13.4s, v28.4s, v16.4s\n" - "mov v8.16b, v25.16b\n" - "ldr s0, [%[wbptr], #36]\n" - "fmla v17.4s, v28.4s, v5.4s\n" - "fmla v15.4s, v29.4s, v16.4s\n" - "add x25, x25, #4\n" - "fmla v17.4s, v29.4s, v7.4s\n" - "bne 5b\n" - "6:\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "ldr s30, [x20, x13]\n" - "fmla v13.4s, v27.4s, v5.4s\n" - "ldr s29, [%[inptr0], x15]\n" - "fmla v10.4s, v27.4s, v16.4s\n" - "ldr s28, [x23]\n" - "fmla v17.4s, v21.4s, v4.4s\n" - "ldr s24, [x22, %[input_col_stride1]]\n" - "fmla v13.4s, v21.4s, v7.4s\n" - "ldr s18, [x21, x13]\n" - "fmla v15.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [x20, x14]\n" - "fmla v12.4s, v21.4s, v16.4s\n" - "ldr s22, [x20, x15]\n" - "fmla v17.4s, v20.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v15.4s, v20.4s, v7.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v14.4s, v20.4s, v16.4s\n" - "ldr s25, [%[inptr0], x17]\n" - "fmla v13.4s, v23.4s, v2.4s\n" - "prfm pldl1keep, [x22, x19]\n" - "fmla v10.4s, v23.4s, v5.4s\n" - "ldr s26, [x23, %[input_col_stride1]]\n" - "fmla v17.4s, v19.4s, v1.4s\n" - "prfm pldl1keep, [x21, x14]\n" - "fmla v13.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v15.4s, v19.4s, v2.4s\n" - "prfm pldl1keep, [%[inptr0], x9]\n" - "fmla v10.4s, v19.4s, v7.4s\n" - "prfm pldl1keep, [x23, x19]\n" - "fmla v12.4s, v19.4s, v5.4s\n" - "prfm pldl1keep, [x22, x14]\n" - "fmla v9.4s, v19.4s, v16.4s\n" - "ldr s27, [x22, x13]\n" - "fmla v17.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v13.4s, v30.4s, v6.4s\n" - "prfm pldl1keep, [x20, x9]\n" - "fmla v15.4s, v30.4s, v4.4s\n" - "prfm pldl1keep, [x23, x14]\n" - "fmla v12.4s, v30.4s, v7.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x21, x9]\n" - "fmla v11.4s, v30.4s, v16.4s\n" - "ldr s21, [x21, x15]\n" - "fmla v15.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "ldr s20, [x20, x17]\n" - "fmla v10.4s, v28.4s, v2.4s\n" - "ldr s19, [x23, x13]\n" - "fmla v13.4s, v24.4s, v1.4s\n" - "prfm pldl1keep, [x22, x9]\n" - "fmla v12.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x9]\n" - "fmla v10.4s, v24.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v9.4s, v24.4s, v5.4s\n" - "ldr s23, [x22, x15]\n" - "fmla v17.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v13.4s, v18.4s, v3.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v15.4s, v18.4s, v1.4s\n" - "add x20, x20, #4\n" - "str s17, [%[outptr0]]\n" - "fmla v10.4s, v18.4s, v6.4s\n" - "fmla v12.4s, v18.4s, v4.4s\n" - "ldr s17, [x21, x17]\n" - "fmla v14.4s, v18.4s, v2.4s\n" - "add x21, x21, #4\n" - "fmla v9.4s, v18.4s, v7.4s\n" - "fmla v11.4s, v18.4s, v5.4s\n" - "fmla v8.4s, v18.4s, v16.4s\n" - "ldr s24, [x23, x15]\n" - "fmla v15.4s, v22.4s, v3.4s\n" - "ldr s18, [x22, x17]\n" - "fmla v12.4s, v22.4s, v6.4s\n" - "add x22, x22, #4\n" - "fmla v14.4s, v22.4s, v4.4s\n" - "fmla v11.4s, v22.4s, v7.4s\n" - "fmla v10.4s, v26.4s, v1.4s\n" - "ldr s22, [x23, x17]\n" - "fmla v9.4s, v26.4s, v2.4s\n" - "add x23, x23, #4\n" - "fmla v14.4s, v25.4s, v6.4s\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "fmla v10.4s, v27.4s, v3.4s\n" - "fmla v12.4s, v27.4s, v1.4s\n" - "fmla v9.4s, v27.4s, v4.4s\n" - "fmla v11.4s, v27.4s, v2.4s\n" - "str s13, [x24]\n" - "fmla v8.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v21.4s, v0.4s\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "fmla v14.4s, v21.4s, v1.4s\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "fmla v8.4s, v21.4s, v7.4s\n" - "str s15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v10.4s, v19.4s, v0.4s\n" - "fmla v14.4s, v20.4s, v3.4s\n" - "fmla v9.4s, v19.4s, v1.4s\n" - "fmla v11.4s, v20.4s, v6.4s\n" - "fmla v8.4s, v19.4s, v2.4s\n" - "str s10, [x25]\n" - "fmla v12.4s, v23.4s, v0.4s\n" - "fmla v9.4s, v23.4s, v3.4s\n" - "fmla v14.4s, v17.4s, v0.4s\n" - "fmla v11.4s, v23.4s, v1.4s\n" - "fmla v8.4s, v23.4s, v4.4s\n" - "str s12, [x24, %[output_col_stride1]]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "str s14, [%[outptr0], x26]\n" - "fmla v11.4s, v17.4s, v3.4s\n" - "fmla v8.4s, v17.4s, v6.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "str s9, [x25, %[output_col_stride1]]\n" - "fmla v11.4s, v18.4s, v0.4s\n" - "fmla v8.4s, v24.4s, v1.4s\n" - "str s11, [x24, x26]\n" - "fmla v8.4s, v18.4s, v3.4s\n" - "add x24, x24, #4\n" - "fmla v8.4s, v22.4s, v0.4s\n" - "str s8, [x25, x26]\n" - "add x25, x25, #4\n" - "7:\n" - : [outptr0] "+r" (output), [inptr0] "+r" (input), [wbptr] "+r" (weight_bias_ptr) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x25, %[inptr0], %[input_row_stride]\n" - "add x16, %[input_col_stride1], %[input_col_stride1]\n" - "add x21, %[outptr0], %[output_row_stride]\n" - "add x22, x25, %[input_row_stride]\n" - "add x23, x16, #64\n" - "add x26, x16, %[input_col_stride1]\n" - "add x13, x22, %[input_row_stride]\n" - "add x20, x26, #64\n" - "add x9, x26, %[input_col_stride1]\n" - "add x24, x13, %[input_row_stride]\n" - "add x15, x9, #64\n" - "add x14, x21, %[output_row_stride]\n" - "add x19, %[output_col_stride1], %[output_col_stride1]\n" - "and x27, %[n_channels], #3\n" - "lsr x28, %[n_channels], #2\n" - "cbz x28, 4f\n" - "1:\n" - "ldr q20, [%[wbptr]]\n" - "subs x28, x28, #1\n" - "mov v4.16b, v20.16b\n" - "ldr q15, [%[wbptr], #16]\n" - "mov v1.16b, v20.16b\n" - "ldr q0, [%[wbptr], #32]\n" - "mov v3.16b, v20.16b\n" - "ldr q13, [%[wbptr], #48]\n" - "mov v7.16b, v20.16b\n" - "ldr q16, [%[wbptr], #64]\n" - "mov v9.16b, v20.16b\n" - "ldr q12, [%[wbptr], #80]\n" - "mov v2.16b, v20.16b\n" - "ldr q17, [%[wbptr], #96]\n" - "mov v6.16b, v20.16b\n" - "ldr q11, [%[wbptr], #112]\n" - "mov v8.16b, v20.16b\n" - "ldr q10, [%[wbptr], #128]\n" - "mov v5.16b, v20.16b\n" - "ldr q14, [%[wbptr], #144]\n" - "ldr q27, [%[inptr0]]\n" - "ldr q24, [x25]\n" - "fmla v4.4s, v27.4s, v15.4s\n" - "ldr q22, [%[inptr0], %[input_col_stride1]]\n" - "ldr q21, [x22]\n" - "ldr q19, [x25, %[input_col_stride1]]\n" - "ldr q31, [%[inptr0], x16]\n" - "ldr q28, [x13]\n" - "fmla v4.4s, v24.4s, v16.4s\n" - "ldr q18, [x22, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x25, #64]\n" - "prfm pldl1keep, [%[inptr0], x17]\n" - "prfm pldl1keep, [x22, #64]\n" - "prfm pldl1keep, [x25, x17]\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "prfm pldl1keep, [x13, #64]\n" - "prfm pldl1keep, [x22, x17]\n" - "beq 3f\n" - "2:\n" - "fmla v1.4s, v24.4s, v15.4s\n" - "ldr q24, [x25, x16]\n" - "fmla v4.4s, v22.4s, v0.4s\n" - "ldr q29, [%[inptr0], x26]\n" - "fmla v3.4s, v22.4s, v15.4s\n" - "ldr q30, [x24]\n" - "fmla v1.4s, v21.4s, v16.4s\n" - "ldr q25, [x13, %[input_col_stride1]]\n" - "fmla v4.4s, v21.4s, v11.4s\n" - "prfm pldl1keep, [x25, x23]\n" - "fmla v7.4s, v21.4s, v15.4s\n" - "ldr q26, [x22, x16]\n" - "fmla v1.4s, v19.4s, v0.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v4.4s, v19.4s, v12.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v3.4s, v19.4s, v16.4s\n" - "prfm pldl1keep, [x13, x17]\n" - "fmla v9.4s, v19.4s, v15.4s\n" - "ldr q23, [x25, x26]\n" - "fmla v4.4s, v31.4s, v13.4s\n" - "prfm pldl1keep, [x22, x23]\n" - "fmla v3.4s, v31.4s, v0.4s\n" - "prfm pldl1keep, [x25, x20]\n" - "fmla v2.4s, v31.4s, v15.4s\n" - "ldr q20, [%[inptr0], x9]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "ldr q28, [x24, %[input_col_stride1]]\n" - "fmla v4.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x24, x17]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "prfm pldl1keep, [x13, x23]\n" - "fmla v3.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x22, x20]\n" - "fmla v7.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [x25, x15]\n" - "fmla v9.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x24, x23]\n" - "fmla v6.4s, v18.4s, v15.4s\n" - "ldr q27, [x13, x16]\n" - "fmla v4.4s, v24.4s, v17.4s\n" - "prfm pldl1keep, [x13, x20]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x22, x15]\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "prfm pldl1keep, [x24, x20]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "prfm pldl1keep, [x13, x15]\n" - "fmla v2.4s, v24.4s, v16.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v8.4s, v24.4s, v15.4s\n" - "ldr q24, [x22, x26]\n" - "fmla v3.4s, v29.4s, v13.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v2.4s, v29.4s, v0.4s\n" - "ldr q22, [x25, x9]\n" - "fmla v7.4s, v30.4s, v11.4s\n" - "ldr q21, [x24, x16]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v9.4s, v25.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v7.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v6.4s, v25.4s, v16.4s\n" - "ldr q19, [x13, x26]\n" - "fmla v4.4s, v26.4s, v14.4s\n" - "prfm pldl1keep, [%[inptr0], x17]\n" - "fmla v1.4s, v26.4s, v17.4s\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "fmla v3.4s, v26.4s, v10.4s\n" - "add x25, x25, #16\n" - "fmla v7.4s, v26.4s, v13.4s\n" - "prfm pldl1keep, [x25, #64]\n" - "fmla v9.4s, v26.4s, v12.4s\n" - "prfm pldl1keep, [x25, x17]\n" - "fmla v2.4s, v26.4s, v11.4s\n" - "subs x28, x28, #1\n" - "fmla v6.4s, v26.4s, v0.4s\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "fmla v5.4s, v26.4s, v15.4s\n" - "ldr q26, [x22, x9]\n" - "fmla v3.4s, v23.4s, v17.4s\n" - "ldr q18, [x24, x26]\n" - "fmla v9.4s, v23.4s, v13.4s\n" - "add x22, x22, #16\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v8.4s, v23.4s, v0.4s\n" - "ldr q23, [x13, x9]\n" - "fmla v7.4s, v28.4s, v10.4s\n" - "prfm pldl1keep, [x22, x17]\n" - "fmla v2.4s, v20.4s, v13.4s\n" - "ldr q25, [x24, x9]\n" - "fmla v6.4s, v28.4s, v11.4s\n" - "ldr q20, [%[wbptr]]\n" - "fmla v1.4s, v27.4s, v14.4s\n" - "add x13, x13, #16\n" - "fmla v7.4s, v27.4s, v17.4s\n" - "prfm pldl1keep, [x13, #64]\n" - "fmla v9.4s, v27.4s, v10.4s\n" - "add x24, x24, #16\n" - "fmla v6.4s, v27.4s, v12.4s\n" - "fmla v8.4s, v27.4s, v11.4s\n" - "fmla v5.4s, v27.4s, v16.4s\n" - "ldr q15, [%[wbptr], #16]\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "ldr q27, [%[inptr0]]\n" - "fmla v9.4s, v24.4s, v17.4s\n" - "fmla v2.4s, v24.4s, v10.4s\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "fmla v8.4s, v24.4s, v12.4s\n" - "fmla v5.4s, v24.4s, v0.4s\n" - "ldr q16, [%[wbptr], #64]\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "ldr q24, [x25]\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "ldr q22, [%[inptr0], %[input_col_stride1]]\n" - "fmla v7.4s, v21.4s, v14.4s\n" - "fmla v6.4s, v21.4s, v10.4s\n" - "fmla v5.4s, v21.4s, v11.4s\n" - "ldr q0, [%[wbptr], #32]\n" - "fmla v9.4s, v19.4s, v14.4s\n" - "ldr q21, [x22]\n" - "fmla v6.4s, v19.4s, v17.4s\n" - "fmla v8.4s, v19.4s, v10.4s\n" - "fmla v5.4s, v19.4s, v12.4s\n" - "ldr q11, [%[wbptr], #112]\n" - "fmla v2.4s, v26.4s, v14.4s\n" - "movi v29.16b, #0\n" - "fmla v8.4s, v26.4s, v17.4s\n" - "fmla v6.4s, v18.4s, v14.4s\n" - "fmla v5.4s, v26.4s, v13.4s\n" - "ldr q12, [%[wbptr], #80]\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "ldr q19, [x25, %[input_col_stride1]]\n" - "fmla v8.4s, v23.4s, v14.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "str q4, [%[outptr0]]\n" - "fmla v5.4s, v18.4s, v10.4s\n" - "str q3, [%[outptr0], %[output_col_stride1]]\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "ldr q13, [%[wbptr], #48]\n" - "str q2, [%[outptr0], x19]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "str q1, [x21]\n" - "fmax v9.4s, v9.4s, v29.4s\n" - "fmax v8.4s, v8.4s, v29.4s\n" - "ldr q10, [%[wbptr], #128]\n" - "str q9, [x21, %[output_col_stride1]]\n" - "fmla v5.4s, v25.4s, v14.4s\n" - "str q8, [x21, x19]\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "ldr q17, [%[wbptr], #96]\n" - "str q7, [x14]\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "str q6, [x14, %[output_col_stride1]]\n" - "mov v4.16b, v20.16b\n" - "str q5, [x14, x19]\n" - "mov v1.16b, v20.16b\n" - "mov v3.16b, v20.16b\n" - "ldr q14, [%[wbptr], #144]\n" - "mov v7.16b, v20.16b\n" - "ldr q31, [%[inptr0], x16]\n" - "mov v9.16b, v20.16b\n" - "ldr q28, [x13]\n" - "mov v2.16b, v20.16b\n" - "ldr q18, [x22, %[input_col_stride1]]\n" - "mov v6.16b, v20.16b\n" - "add %[outptr0], %[outptr0], #16\n" - "mov v8.16b, v20.16b\n" - "add x21, x21, #16\n" - "mov v5.16b, v20.16b\n" - "add x14, x14, #16\n" - "fmla v4.4s, v27.4s, v15.4s\n" - "fmla v4.4s, v24.4s, v16.4s\n" - "bne 2b\n" - "3:\n" - "fmla v1.4s, v24.4s, v15.4s\n" - "ldr q24, [x25, x16]\n" - "fmla v4.4s, v22.4s, v0.4s\n" - "ldr q29, [%[inptr0], x26]\n" - "fmla v3.4s, v22.4s, v15.4s\n" - "ldr q30, [x24]\n" - "fmla v1.4s, v21.4s, v16.4s\n" - "ldr q25, [x13, %[input_col_stride1]]\n" - "fmla v4.4s, v21.4s, v11.4s\n" - "prfm pldl1keep, [x25, x23]\n" - "fmla v7.4s, v21.4s, v15.4s\n" - "ldr q26, [x22, x16]\n" - "fmla v1.4s, v19.4s, v0.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v4.4s, v19.4s, v12.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v3.4s, v19.4s, v16.4s\n" - "prfm pldl1keep, [x13, x17]\n" - "fmla v9.4s, v19.4s, v15.4s\n" - "ldr q23, [x25, x26]\n" - "fmla v4.4s, v31.4s, v13.4s\n" - "prfm pldl1keep, [x22, x23]\n" - "fmla v3.4s, v31.4s, v0.4s\n" - "prfm pldl1keep, [x25, x20]\n" - "fmla v2.4s, v31.4s, v15.4s\n" - "ldr q20, [%[inptr0], x9]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "ldr q28, [x24, %[input_col_stride1]]\n" - "fmla v4.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x24, x17]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "prfm pldl1keep, [x13, x23]\n" - "fmla v3.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x22, x20]\n" - "fmla v7.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [x25, x15]\n" - "fmla v9.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x24, x23]\n" - "fmla v6.4s, v18.4s, v15.4s\n" - "ldr q27, [x13, x16]\n" - "fmla v4.4s, v24.4s, v17.4s\n" - "prfm pldl1keep, [x13, x20]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x22, x15]\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "prfm pldl1keep, [x24, x20]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "prfm pldl1keep, [x13, x15]\n" - "fmla v2.4s, v24.4s, v16.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v8.4s, v24.4s, v15.4s\n" - "ldr q24, [x22, x26]\n" - "fmla v3.4s, v29.4s, v13.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v2.4s, v29.4s, v0.4s\n" - "ldr q22, [x25, x9]\n" - "fmla v7.4s, v30.4s, v11.4s\n" - "ldr q21, [x24, x16]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v9.4s, v25.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v7.4s, v25.4s, v12.4s\n" - "add x25, x25, #16\n" - "fmla v6.4s, v25.4s, v16.4s\n" - "ldr q19, [x13, x26]\n" - "fmla v4.4s, v26.4s, v14.4s\n" - "fmla v1.4s, v26.4s, v17.4s\n" - "fmla v3.4s, v26.4s, v10.4s\n" - "fmla v7.4s, v26.4s, v13.4s\n" - "fmla v9.4s, v26.4s, v12.4s\n" - "fmla v2.4s, v26.4s, v11.4s\n" - "fmla v6.4s, v26.4s, v0.4s\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "fmla v5.4s, v26.4s, v15.4s\n" - "ldr q26, [x22, x9]\n" - "fmla v3.4s, v23.4s, v17.4s\n" - "ldr q18, [x24, x26]\n" - "fmla v9.4s, v23.4s, v13.4s\n" - "add x22, x22, #16\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "fmla v8.4s, v23.4s, v0.4s\n" - "fmla v7.4s, v28.4s, v10.4s\n" - "ldr q23, [x13, x9]\n" - "fmla v6.4s, v28.4s, v11.4s\n" - "ldr q25, [x24, x9]\n" - "fmla v2.4s, v20.4s, v13.4s\n" - "add x13, x13, #16\n" - "fmla v1.4s, v27.4s, v14.4s\n" - "add x24, x24, #16\n" - "fmla v7.4s, v27.4s, v17.4s\n" - "fmla v9.4s, v27.4s, v10.4s\n" - "fmla v6.4s, v27.4s, v12.4s\n" - "fmla v8.4s, v27.4s, v11.4s\n" - "fmla v5.4s, v27.4s, v16.4s\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "fmla v9.4s, v24.4s, v17.4s\n" - "fmla v2.4s, v24.4s, v10.4s\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "fmla v8.4s, v24.4s, v12.4s\n" - "fmla v5.4s, v24.4s, v0.4s\n" - "fmla v7.4s, v21.4s, v14.4s\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "fmla v9.4s, v19.4s, v14.4s\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "fmla v6.4s, v21.4s, v10.4s\n" - "fmla v5.4s, v21.4s, v11.4s\n" - "movi v29.16b, #0\n" - "fmla v2.4s, v26.4s, v14.4s\n" - "fmla v6.4s, v19.4s, v17.4s\n" - "fmla v8.4s, v19.4s, v10.4s\n" - "fmla v5.4s, v19.4s, v12.4s\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "fmla v6.4s, v18.4s, v14.4s\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "str q4, [%[outptr0]]\n" - "fmla v8.4s, v26.4s, v17.4s\n" - "str q3, [%[outptr0], %[output_col_stride1]]\n" - "fmla v5.4s, v26.4s, v13.4s\n" - "str q2, [%[outptr0], x19]\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "fmla v8.4s, v23.4s, v14.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "str q1, [x21]\n" - "fmla v5.4s, v18.4s, v10.4s\n" - "fmax v9.4s, v9.4s, v29.4s\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmax v8.4s, v8.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "str q9, [x21, %[output_col_stride1]]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "str q8, [x21, x19]\n" - "str q7, [x14]\n" - "str q6, [x14, %[output_col_stride1]]\n" - "add x21, x21, #16\n" - "fmla v5.4s, v25.4s, v14.4s\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "str q5, [x14, x19]\n" - "add x14, x14, #16\n" - "4:\n" - "cbz x27, 7f\n" - "ldr s20, [%[wbptr]]\n" - "mov v4.16b, v20.16b\n" - "ldr s15, [%[wbptr], #4]\n" - "mov v1.16b, v20.16b\n" - "ldr s0, [%[wbptr], #8]\n" - "mov v3.16b, v20.16b\n" - "ldr s13, [%[wbptr], #12]\n" - "mov v7.16b, v20.16b\n" - "ldr s16, [%[wbptr], #16]\n" - "mov v9.16b, v20.16b\n" - "ldr s12, [%[wbptr], #20]\n" - "mov v2.16b, v20.16b\n" - "ldr s17, [%[wbptr], #24]\n" - "mov v6.16b, v20.16b\n" - "ldr s11, [%[wbptr], #28]\n" - "mov v8.16b, v20.16b\n" - "ldr s10, [%[wbptr], #32]\n" - "mov v5.16b, v20.16b\n" - "ldr s14, [%[wbptr], #36]\n" - "ldr s27, [%[inptr0]]\n" - "subs x27, x27, #1\n" - "fmla v4.4s, v27.4s, v15.4s\n" - "ldr s24, [x25]\n" - "ldr s22, [%[inptr0], %[input_col_stride1]]\n" - "ldr s21, [x22]\n" - "ldr s19, [x25, %[input_col_stride1]]\n" - "ldr s31, [%[inptr0], x16]\n" - "fmla v4.4s, v24.4s, v16.4s\n" - "ldr s28, [x13]\n" - "ldr s18, [x22, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x25, #64]\n" - "prfm pldl1keep, [%[inptr0], x17]\n" - "prfm pldl1keep, [x22, #64]\n" - "prfm pldl1keep, [x25, x17]\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "prfm pldl1keep, [x13, #64]\n" - "prfm pldl1keep, [x22, x17]\n" - "beq 6f\n" - "5:\n" - "fmla v1.4s, v24.4s, v15.4s\n" - "ldr s24, [x25, x16]\n" - "fmla v4.4s, v22.4s, v0.4s\n" - "ldr s29, [%[inptr0], x26]\n" - "fmla v3.4s, v22.4s, v15.4s\n" - "ldr s30, [x24]\n" - "fmla v1.4s, v21.4s, v16.4s\n" - "ldr s25, [x13, %[input_col_stride1]]\n" - "fmla v4.4s, v21.4s, v11.4s\n" - "prfm pldl1keep, [x25, x23]\n" - "fmla v7.4s, v21.4s, v15.4s\n" - "ldr s26, [x22, x16]\n" - "fmla v1.4s, v19.4s, v0.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v4.4s, v19.4s, v12.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v3.4s, v19.4s, v16.4s\n" - "prfm pldl1keep, [x13, x17]\n" - "fmla v9.4s, v19.4s, v15.4s\n" - "ldr s23, [x25, x26]\n" - "fmla v4.4s, v31.4s, v13.4s\n" - "prfm pldl1keep, [x22, x23]\n" - "fmla v3.4s, v31.4s, v0.4s\n" - "prfm pldl1keep, [x25, x20]\n" - "fmla v2.4s, v31.4s, v15.4s\n" - "ldr s20, [%[inptr0], x9]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "ldr s28, [x24, %[input_col_stride1]]\n" - "fmla v4.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x24, x17]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "prfm pldl1keep, [x13, x23]\n" - "fmla v3.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x22, x20]\n" - "fmla v7.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [x25, x15]\n" - "fmla v9.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x24, x23]\n" - "fmla v6.4s, v18.4s, v15.4s\n" - "ldr s27, [x13, x16]\n" - "fmla v4.4s, v24.4s, v17.4s\n" - "prfm pldl1keep, [x13, x20]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x22, x15]\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "prfm pldl1keep, [x24, x20]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "prfm pldl1keep, [x13, x15]\n" - "fmla v2.4s, v24.4s, v16.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v8.4s, v24.4s, v15.4s\n" - "ldr s24, [x22, x26]\n" - "fmla v3.4s, v29.4s, v13.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v2.4s, v29.4s, v0.4s\n" - "ldr s22, [x25, x9]\n" - "fmla v7.4s, v30.4s, v11.4s\n" - "ldr s21, [x24, x16]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v9.4s, v25.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v7.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v6.4s, v25.4s, v16.4s\n" - "ldr s19, [x13, x26]\n" - "fmla v4.4s, v26.4s, v14.4s\n" - "prfm pldl1keep, [%[inptr0], x17]\n" - "fmla v1.4s, v26.4s, v17.4s\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "fmla v3.4s, v26.4s, v10.4s\n" - "add x25, x25, #4\n" - "fmla v7.4s, v26.4s, v13.4s\n" - "prfm pldl1keep, [x25, #64]\n" - "fmla v9.4s, v26.4s, v12.4s\n" - "prfm pldl1keep, [x25, x17]\n" - "fmla v2.4s, v26.4s, v11.4s\n" - "subs x27, x27, #1\n" - "fmla v6.4s, v26.4s, v0.4s\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "fmla v5.4s, v26.4s, v15.4s\n" - "ldr s26, [x22, x9]\n" - "fmla v3.4s, v23.4s, v17.4s\n" - "ldr s18, [x24, x26]\n" - "fmla v9.4s, v23.4s, v13.4s\n" - "add x22, x22, #4\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v8.4s, v23.4s, v0.4s\n" - "ldr s23, [x13, x9]\n" - "fmla v7.4s, v28.4s, v10.4s\n" - "prfm pldl1keep, [x22, x17]\n" - "fmla v2.4s, v20.4s, v13.4s\n" - "ldr s25, [x24, x9]\n" - "fmla v6.4s, v28.4s, v11.4s\n" - "ldr s20, [%[wbptr]]\n" - "fmla v1.4s, v27.4s, v14.4s\n" - "add x13, x13, #4\n" - "fmla v7.4s, v27.4s, v17.4s\n" - "prfm pldl1keep, [x13, #64]\n" - "fmla v9.4s, v27.4s, v10.4s\n" - "add x24, x24, #4\n" - "fmla v6.4s, v27.4s, v12.4s\n" - "fmla v8.4s, v27.4s, v11.4s\n" - "fmla v5.4s, v27.4s, v16.4s\n" - "ldr s15, [%[wbptr], #4]\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "ldr s27, [%[inptr0]]\n" - "fmla v9.4s, v24.4s, v17.4s\n" - "fmla v2.4s, v24.4s, v10.4s\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "fmla v8.4s, v24.4s, v12.4s\n" - "fmla v5.4s, v24.4s, v0.4s\n" - "ldr s16, [%[wbptr], #16]\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "ldr s24, [x25]\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "ldr s22, [%[inptr0], %[input_col_stride1]]\n" - "fmla v7.4s, v21.4s, v14.4s\n" - "fmla v6.4s, v21.4s, v10.4s\n" - "fmla v5.4s, v21.4s, v11.4s\n" - "ldr s0, [%[wbptr], #8]\n" - "fmla v9.4s, v19.4s, v14.4s\n" - "ldr s21, [x22]\n" - "fmla v6.4s, v19.4s, v17.4s\n" - "fmla v8.4s, v19.4s, v10.4s\n" - "fmla v5.4s, v19.4s, v12.4s\n" - "ldr s11, [%[wbptr], #28]\n" - "fmla v2.4s, v26.4s, v14.4s\n" - "movi v29.16b, #0\n" - "fmla v8.4s, v26.4s, v17.4s\n" - "fmla v6.4s, v18.4s, v14.4s\n" - "fmla v5.4s, v26.4s, v13.4s\n" - "ldr s12, [%[wbptr], #20]\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "ldr s19, [x25, %[input_col_stride1]]\n" - "fmla v8.4s, v23.4s, v14.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "str s4, [%[outptr0]]\n" - "fmla v5.4s, v18.4s, v10.4s\n" - "str s3, [%[outptr0], %[output_col_stride1]]\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "ldr s13, [%[wbptr], #12]\n" - "str s2, [%[outptr0], x19]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "str s1, [x21]\n" - "fmax v9.4s, v9.4s, v29.4s\n" - "fmax v8.4s, v8.4s, v29.4s\n" - "ldr s10, [%[wbptr], #32]\n" - "str s9, [x21, %[output_col_stride1]]\n" - "fmla v5.4s, v25.4s, v14.4s\n" - "str s8, [x21, x19]\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "ldr s17, [%[wbptr], #24]\n" - "str s7, [x14]\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "str s6, [x14, %[output_col_stride1]]\n" - "mov v4.16b, v20.16b\n" - "str s5, [x14, x19]\n" - "mov v1.16b, v20.16b\n" - "mov v3.16b, v20.16b\n" - "ldr s14, [%[wbptr], #36]\n" - "mov v7.16b, v20.16b\n" - "ldr s31, [%[inptr0], x16]\n" - "mov v9.16b, v20.16b\n" - "ldr s28, [x13]\n" - "mov v2.16b, v20.16b\n" - "ldr s18, [x22, %[input_col_stride1]]\n" - "mov v6.16b, v20.16b\n" - "add %[outptr0], %[outptr0], #4\n" - "mov v8.16b, v20.16b\n" - "add x21, x21, #4\n" - "mov v5.16b, v20.16b\n" - "add x14, x14, #4\n" - "fmla v4.4s, v27.4s, v15.4s\n" - "fmla v4.4s, v24.4s, v16.4s\n" - "bne 5b\n" - "6:\n" - "fmla v1.4s, v24.4s, v15.4s\n" - "ldr s24, [x25, x16]\n" - "fmla v4.4s, v22.4s, v0.4s\n" - "ldr s29, [%[inptr0], x26]\n" - "fmla v3.4s, v22.4s, v15.4s\n" - "ldr s30, [x24]\n" - "fmla v1.4s, v21.4s, v16.4s\n" - "ldr s25, [x13, %[input_col_stride1]]\n" - "fmla v4.4s, v21.4s, v11.4s\n" - "prfm pldl1keep, [x25, x23]\n" - "fmla v7.4s, v21.4s, v15.4s\n" - "ldr s26, [x22, x16]\n" - "fmla v1.4s, v19.4s, v0.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v4.4s, v19.4s, v12.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v3.4s, v19.4s, v16.4s\n" - "prfm pldl1keep, [x13, x17]\n" - "fmla v9.4s, v19.4s, v15.4s\n" - "ldr s23, [x25, x26]\n" - "fmla v4.4s, v31.4s, v13.4s\n" - "prfm pldl1keep, [x22, x23]\n" - "fmla v3.4s, v31.4s, v0.4s\n" - "prfm pldl1keep, [x25, x20]\n" - "fmla v2.4s, v31.4s, v15.4s\n" - "ldr s20, [%[inptr0], x9]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "ldr s28, [x24, %[input_col_stride1]]\n" - "fmla v4.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x24, x17]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "prfm pldl1keep, [x13, x23]\n" - "fmla v3.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x22, x20]\n" - "fmla v7.4s, v18.4s, v0.4s\n" - "prfm pldl1keep, [x25, x15]\n" - "fmla v9.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x24, x23]\n" - "fmla v6.4s, v18.4s, v15.4s\n" - "ldr s27, [x13, x16]\n" - "fmla v4.4s, v24.4s, v17.4s\n" - "prfm pldl1keep, [x13, x20]\n" - "fmla v1.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x22, x15]\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "prfm pldl1keep, [x24, x20]\n" - "fmla v9.4s, v24.4s, v0.4s\n" - "prfm pldl1keep, [x13, x15]\n" - "fmla v2.4s, v24.4s, v16.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v8.4s, v24.4s, v15.4s\n" - "ldr s24, [x22, x26]\n" - "fmla v3.4s, v29.4s, v13.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v2.4s, v29.4s, v0.4s\n" - "ldr s22, [x25, x9]\n" - "fmla v7.4s, v30.4s, v11.4s\n" - "ldr s21, [x24, x16]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v9.4s, v25.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v7.4s, v25.4s, v12.4s\n" - "add x25, x25, #4\n" - "fmla v6.4s, v25.4s, v16.4s\n" - "ldr s19, [x13, x26]\n" - "fmla v4.4s, v26.4s, v14.4s\n" - "fmla v1.4s, v26.4s, v17.4s\n" - "fmla v3.4s, v26.4s, v10.4s\n" - "fmla v7.4s, v26.4s, v13.4s\n" - "fmla v9.4s, v26.4s, v12.4s\n" - "fmla v2.4s, v26.4s, v11.4s\n" - "fmla v6.4s, v26.4s, v0.4s\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "fmla v5.4s, v26.4s, v15.4s\n" - "ldr s26, [x22, x9]\n" - "fmla v3.4s, v23.4s, v17.4s\n" - "ldr s18, [x24, x26]\n" - "fmla v9.4s, v23.4s, v13.4s\n" - "add x22, x22, #4\n" - "fmla v2.4s, v23.4s, v12.4s\n" - "fmla v8.4s, v23.4s, v0.4s\n" - "fmla v7.4s, v28.4s, v10.4s\n" - "ldr s23, [x13, x9]\n" - "fmla v6.4s, v28.4s, v11.4s\n" - "ldr s25, [x24, x9]\n" - "fmla v2.4s, v20.4s, v13.4s\n" - "add x13, x13, #4\n" - "fmla v1.4s, v27.4s, v14.4s\n" - "add x24, x24, #4\n" - "fmla v7.4s, v27.4s, v17.4s\n" - "fmla v9.4s, v27.4s, v10.4s\n" - "fmla v6.4s, v27.4s, v12.4s\n" - "fmla v8.4s, v27.4s, v11.4s\n" - "fmla v5.4s, v27.4s, v16.4s\n" - "fmla v3.4s, v24.4s, v14.4s\n" - "fmla v9.4s, v24.4s, v17.4s\n" - "fmla v2.4s, v24.4s, v10.4s\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "fmla v8.4s, v24.4s, v12.4s\n" - "fmla v5.4s, v24.4s, v0.4s\n" - "fmla v7.4s, v21.4s, v14.4s\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "fmla v9.4s, v19.4s, v14.4s\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "fmla v6.4s, v21.4s, v10.4s\n" - "fmla v5.4s, v21.4s, v11.4s\n" - "movi v29.16b, #0\n" - "fmla v2.4s, v26.4s, v14.4s\n" - "fmla v6.4s, v19.4s, v17.4s\n" - "fmla v8.4s, v19.4s, v10.4s\n" - "fmla v5.4s, v19.4s, v12.4s\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "fmla v6.4s, v18.4s, v14.4s\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "str s4, [%[outptr0]]\n" - "fmla v8.4s, v26.4s, v17.4s\n" - "str s3, [%[outptr0], %[output_col_stride1]]\n" - "fmla v5.4s, v26.4s, v13.4s\n" - "str s2, [%[outptr0], x19]\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "fmla v8.4s, v23.4s, v14.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "str s1, [x21]\n" - "fmla v5.4s, v18.4s, v10.4s\n" - "fmax v9.4s, v9.4s, v29.4s\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmax v8.4s, v8.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "str s9, [x21, %[output_col_stride1]]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "str s8, [x21, x19]\n" - "str s7, [x14]\n" - "str s6, [x14, %[output_col_stride1]]\n" - "add x21, x21, #4\n" - "fmla v5.4s, v25.4s, v14.4s\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "str s5, [x14, x19]\n" - "add x14, x14, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU6>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x17, %[inptr0], %[input_row_stride]\n" - "add x9, %[input_col_stride1], %[input_col_stride1]\n" - "add x25, %[outptr0], %[output_row_stride]\n" - "add x14, x17, %[input_row_stride]\n" - "add x22, x9, #64\n" - "add x15, x9, %[input_col_stride1]\n" - "add x21, x14, %[input_row_stride]\n" - "add x16, x15, #64\n" - "add x24, x15, %[input_col_stride1]\n" - "add x26, x21, %[input_row_stride]\n" - "add x23, x24, #64\n" - "add x13, x25, %[output_row_stride]\n" - "add x27, %[output_col_stride1], %[output_col_stride1]\n" - "and x19, %[n_channels], #3\n" - "lsr x20, %[n_channels], #2\n" - "cbz x20, 4f\n" - "1:\n" - "ldr q19, [%[wbptr]]\n" - "subs x20, x20, #1\n" - "mov v8.16b, v19.16b\n" - "ldr q17, [%[wbptr], #16]\n" - "mov v5.16b, v19.16b\n" - "ldr q16, [%[wbptr], #32]\n" - "mov v7.16b, v19.16b\n" - "ldr q15, [%[wbptr], #48]\n" - "mov v2.16b, v19.16b\n" - "ldr q14, [%[wbptr], #64]\n" - "mov v4.16b, v19.16b\n" - "ldr q13, [%[wbptr], #80]\n" - "mov v6.16b, v19.16b\n" - "ldr q12, [%[wbptr], #96]\n" - "mov v1.16b, v19.16b\n" - "ldr q11, [%[wbptr], #112]\n" - "mov v3.16b, v19.16b\n" - "ldr q10, [%[wbptr], #128]\n" - "mov v0.16b, v19.16b\n" - "ldr q9, [%[wbptr], #144]\n" - "ldr q25, [%[inptr0]]\n" - "ldr q27, [x17]\n" - "fmla v8.4s, v25.4s, v17.4s\n" - "ldr q26, [%[inptr0], %[input_col_stride1]]\n" - "ldr q20, [x14]\n" - "ldr q22, [x17, %[input_col_stride1]]\n" - "ldr q28, [%[inptr0], x9]\n" - "ldr q23, [x21]\n" - "fmla v8.4s, v27.4s, v14.4s\n" - "ldr q18, [x14, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x17, #64]\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "prfm pldl1keep, [x14, #64]\n" - "prfm pldl1keep, [x17, x28]\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "prfm pldl1keep, [x21, #64]\n" - "prfm pldl1keep, [x14, x28]\n" - "beq 3f\n" - "2:\n" - "fmla v5.4s, v27.4s, v17.4s\n" - "ldr q27, [x17, x9]\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "ldr q30, [%[inptr0], x15]\n" - "fmla v7.4s, v26.4s, v17.4s\n" - "ldr q31, [x26]\n" - "fmla v5.4s, v20.4s, v14.4s\n" - "ldr q24, [x21, %[input_col_stride1]]\n" - "fmla v8.4s, v20.4s, v11.4s\n" - "prfm pldl1keep, [x17, x22]\n" - "fmla v2.4s, v20.4s, v17.4s\n" - "ldr q29, [x14, x9]\n" - "fmla v5.4s, v22.4s, v16.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x26, #64]\n" - "fmla v7.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x21, x28]\n" - "fmla v4.4s, v22.4s, v17.4s\n" - "ldr q21, [x17, x15]\n" - "fmla v8.4s, v28.4s, v15.4s\n" - "prfm pldl1keep, [x14, x22]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "prfm pldl1keep, [x17, x16]\n" - "fmla v6.4s, v28.4s, v17.4s\n" - "ldr q19, [%[inptr0], x24]\n" - "fmla v5.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "ldr q28, [x26, %[input_col_stride1]]\n" - "fmla v8.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x26, x28]\n" - "fmla v5.4s, v18.4s, v13.4s\n" - "prfm pldl1keep, [x21, x22]\n" - "fmla v7.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x14, x16]\n" - "fmla v2.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x17, x23]\n" - "fmla v4.4s, v18.4s, v14.4s\n" - "prfm pldl1keep, [x26, x22]\n" - "fmla v1.4s, v18.4s, v17.4s\n" - "ldr q25, [x21, x9]\n" - "fmla v8.4s, v27.4s, v12.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v5.4s, v27.4s, v15.4s\n" - "prfm pldl1keep, [x14, x23]\n" - "fmla v7.4s, v27.4s, v13.4s\n" - "prfm pldl1keep, [x26, x16]\n" - "fmla v4.4s, v27.4s, v16.4s\n" - "prfm pldl1keep, [x21, x23]\n" - "fmla v6.4s, v27.4s, v14.4s\n" - "prfm pldl1keep, [x26, x23]\n" - "fmla v3.4s, v27.4s, v17.4s\n" - "ldr q27, [x14, x15]\n" - "fmla v7.4s, v30.4s, v15.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v6.4s, v30.4s, v16.4s\n" - "ldr q26, [x17, x24]\n" - "fmla v2.4s, v31.4s, v11.4s\n" - "ldr q20, [x26, x9]\n" - "fmla v5.4s, v24.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v4.4s, v24.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v2.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v1.4s, v24.4s, v14.4s\n" - "ldr q18, [x21, x15]\n" - "fmla v8.4s, v29.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "fmla v5.4s, v29.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "fmla v7.4s, v29.4s, v10.4s\n" - "add x17, x17, #16\n" - "fmla v2.4s, v29.4s, v15.4s\n" - "prfm pldl1keep, [x17, #64]\n" - "fmla v4.4s, v29.4s, v13.4s\n" - "prfm pldl1keep, [x17, x28]\n" - "fmla v6.4s, v29.4s, v11.4s\n" - "subs x20, x20, #1\n" - "fmla v1.4s, v29.4s, v16.4s\n" - "fmla v3.4s, v29.4s, v14.4s\n" - "fmla v0.4s, v29.4s, v17.4s\n" - "ldr q22, [x14, x24]\n" - "fmla v7.4s, v21.4s, v12.4s\n" - "ldr q23, [x26, x15]\n" - "fmla v4.4s, v21.4s, v15.4s\n" - "add x14, x14, #16\n" - "fmla v6.4s, v21.4s, v13.4s\n" - "prfm pldl1keep, [x14, #64]\n" - "fmla v3.4s, v21.4s, v16.4s\n" - "ldr q24, [x21, x24]\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "prfm pldl1keep, [x14, x28]\n" - "fmla v6.4s, v19.4s, v15.4s\n" - "ldr q21, [x26, x24]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "ldr q19, [%[wbptr]]\n" - "fmla v5.4s, v25.4s, v9.4s\n" - "add x21, x21, #16\n" - "fmla v2.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x21, #64]\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "add x26, x26, #16\n" - "fmla v1.4s, v25.4s, v13.4s\n" - "fmla v3.4s, v25.4s, v11.4s\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "ldr q17, [%[wbptr], #16]\n" - "fmla v7.4s, v27.4s, v9.4s\n" - "ldr q25, [%[inptr0]]\n" - "fmla v4.4s, v27.4s, v12.4s\n" - "fmla v6.4s, v27.4s, v10.4s\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "fmla v3.4s, v27.4s, v13.4s\n" - "fmla v0.4s, v27.4s, v16.4s\n" - "ldr q14, [%[wbptr], #64]\n" - "fmla v6.4s, v26.4s, v12.4s\n" - "ldr q27, [x17]\n" - "fmla v3.4s, v26.4s, v15.4s\n" - "ldr q26, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v20.4s, v9.4s\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "fmla v0.4s, v20.4s, v11.4s\n" - "ldr q16, [%[wbptr], #32]\n" - "fmla v4.4s, v18.4s, v9.4s\n" - "ldr q20, [x14]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "fmla v0.4s, v18.4s, v13.4s\n" - "ldr q11, [%[wbptr], #112]\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "movi v30.16b, #0\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "fmla v0.4s, v22.4s, v15.4s\n" - "ldr q13, [%[wbptr], #80]\n" - "fmov v29.4s, #6.0\n" - "fmax v8.4s, v8.4s, v30.4s\n" - "fmla v3.4s, v24.4s, v9.4s\n" - "fmax v7.4s, v7.4s, v30.4s\n" - "fmla v0.4s, v23.4s, v10.4s\n" - "ldr q15, [%[wbptr], #48]\n" - "fmin v8.4s, v8.4s, v29.4s\n" - "ldr q22, [x17, %[input_col_stride1]]\n" - "fmin v7.4s, v7.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v30.4s\n" - "str q8, [%[outptr0]]\n" - "fmla v0.4s, v24.4s, v12.4s\n" - "str q7, [%[outptr0], %[output_col_stride1]]\n" - "fmin v6.4s, v6.4s, v29.4s\n" - "fmax v5.4s, v5.4s, v30.4s\n" - "ldr q10, [%[wbptr], #128]\n" - "str q6, [%[outptr0], x27]\n" - "fmla v0.4s, v21.4s, v9.4s\n" - "fmin v5.4s, v5.4s, v29.4s\n" - "ldr q12, [%[wbptr], #96]\n" - "fmax v4.4s, v4.4s, v30.4s\n" - "ldr q28, [%[inptr0], x9]\n" - "str q5, [x25]\n" - "fmax v3.4s, v3.4s, v30.4s\n" - "fmin v4.4s, v4.4s, v29.4s\n" - "ldr q9, [%[wbptr], #144]\n" - "fmin v3.4s, v3.4s, v29.4s\n" - "ldr q23, [x21]\n" - "str q4, [x25, %[output_col_stride1]]\n" - "fmax v2.4s, v2.4s, v30.4s\n" - "str q3, [x25, x27]\n" - "fmax v1.4s, v1.4s, v30.4s\n" - "fmin v2.4s, v2.4s, v29.4s\n" - "ldr q18, [x14, %[input_col_stride1]]\n" - "fmin v1.4s, v1.4s, v29.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "str q2, [x13]\n" - "fmax v0.4s, v0.4s, v30.4s\n" - "str q1, [x13, %[output_col_stride1]]\n" - "mov v8.16b, v19.16b\n" - "fmin v0.4s, v0.4s, v29.4s\n" - "add x25, x25, #16\n" - "mov v5.16b, v19.16b\n" - "mov v7.16b, v19.16b\n" - "str q0, [x13, x27]\n" - "mov v2.16b, v19.16b\n" - "mov v4.16b, v19.16b\n" - "add x13, x13, #16\n" - "mov v6.16b, v19.16b\n" - "mov v1.16b, v19.16b\n" - "mov v3.16b, v19.16b\n" - "mov v0.16b, v19.16b\n" - "fmla v8.4s, v25.4s, v17.4s\n" - "fmla v8.4s, v27.4s, v14.4s\n" - "bne 2b\n" - "3:\n" - "fmla v5.4s, v27.4s, v17.4s\n" - "ldr q27, [x17, x9]\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "ldr q30, [%[inptr0], x15]\n" - "fmla v7.4s, v26.4s, v17.4s\n" - "ldr q31, [x26]\n" - "fmla v5.4s, v20.4s, v14.4s\n" - "ldr q24, [x21, %[input_col_stride1]]\n" - "fmla v8.4s, v20.4s, v11.4s\n" - "prfm pldl1keep, [x17, x22]\n" - "fmla v2.4s, v20.4s, v17.4s\n" - "ldr q29, [x14, x9]\n" - "fmla v5.4s, v22.4s, v16.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x26, #64]\n" - "fmla v7.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x21, x28]\n" - "fmla v4.4s, v22.4s, v17.4s\n" - "ldr q21, [x17, x15]\n" - "fmla v8.4s, v28.4s, v15.4s\n" - "prfm pldl1keep, [x14, x22]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "prfm pldl1keep, [x17, x16]\n" - "fmla v6.4s, v28.4s, v17.4s\n" - "ldr q19, [%[inptr0], x24]\n" - "fmla v5.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "ldr q28, [x26, %[input_col_stride1]]\n" - "fmla v8.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x26, x28]\n" - "fmla v5.4s, v18.4s, v13.4s\n" - "prfm pldl1keep, [x21, x22]\n" - "fmla v7.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x14, x16]\n" - "fmla v2.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x17, x23]\n" - "fmla v4.4s, v18.4s, v14.4s\n" - "prfm pldl1keep, [x26, x22]\n" - "fmla v1.4s, v18.4s, v17.4s\n" - "ldr q25, [x21, x9]\n" - "fmla v8.4s, v27.4s, v12.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v5.4s, v27.4s, v15.4s\n" - "prfm pldl1keep, [x14, x23]\n" - "fmla v7.4s, v27.4s, v13.4s\n" - "prfm pldl1keep, [x26, x16]\n" - "fmla v4.4s, v27.4s, v16.4s\n" - "prfm pldl1keep, [x21, x23]\n" - "fmla v6.4s, v27.4s, v14.4s\n" - "prfm pldl1keep, [x26, x23]\n" - "fmla v3.4s, v27.4s, v17.4s\n" - "ldr q27, [x14, x15]\n" - "fmla v7.4s, v30.4s, v15.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v6.4s, v30.4s, v16.4s\n" - "ldr q26, [x17, x24]\n" - "fmla v2.4s, v31.4s, v11.4s\n" - "ldr q20, [x26, x9]\n" - "fmla v5.4s, v24.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v4.4s, v24.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v2.4s, v24.4s, v13.4s\n" - "add x17, x17, #16\n" - "fmla v1.4s, v24.4s, v14.4s\n" - "ldr q18, [x21, x15]\n" - "fmla v8.4s, v29.4s, v9.4s\n" - "fmla v5.4s, v29.4s, v12.4s\n" - "fmla v7.4s, v29.4s, v10.4s\n" - "fmla v2.4s, v29.4s, v15.4s\n" - "fmla v4.4s, v29.4s, v13.4s\n" - "fmla v6.4s, v29.4s, v11.4s\n" - "fmla v1.4s, v29.4s, v16.4s\n" - "fmla v3.4s, v29.4s, v14.4s\n" - "fmla v0.4s, v29.4s, v17.4s\n" - "ldr q22, [x14, x24]\n" - "fmla v7.4s, v21.4s, v12.4s\n" - "ldr q23, [x26, x15]\n" - "fmla v4.4s, v21.4s, v15.4s\n" - "add x14, x14, #16\n" - "fmla v6.4s, v21.4s, v13.4s\n" - "fmla v3.4s, v21.4s, v16.4s\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "ldr q24, [x21, x24]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "ldr q21, [x26, x24]\n" - "fmla v6.4s, v19.4s, v15.4s\n" - "add x21, x21, #16\n" - "fmla v5.4s, v25.4s, v9.4s\n" - "add x26, x26, #16\n" - "fmla v2.4s, v25.4s, v12.4s\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "fmla v1.4s, v25.4s, v13.4s\n" - "fmla v3.4s, v25.4s, v11.4s\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "fmla v7.4s, v27.4s, v9.4s\n" - "fmla v4.4s, v27.4s, v12.4s\n" - "fmla v6.4s, v27.4s, v10.4s\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "fmla v3.4s, v27.4s, v13.4s\n" - "fmla v0.4s, v27.4s, v16.4s\n" - "fmla v2.4s, v20.4s, v9.4s\n" - "fmla v6.4s, v26.4s, v12.4s\n" - "fmla v4.4s, v18.4s, v9.4s\n" - "fmla v3.4s, v26.4s, v15.4s\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "fmla v0.4s, v20.4s, v11.4s\n" - "movi v30.16b, #0\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "fmov v29.4s, #6.0\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "fmla v0.4s, v18.4s, v13.4s\n" - "fmax v8.4s, v8.4s, v30.4s\n" - "fmax v7.4s, v7.4s, v30.4s\n" - "fmax v6.4s, v6.4s, v30.4s\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "fmla v0.4s, v22.4s, v15.4s\n" - "fmin v8.4s, v8.4s, v29.4s\n" - "fmin v7.4s, v7.4s, v29.4s\n" - "fmin v6.4s, v6.4s, v29.4s\n" - "str q8, [%[outptr0]]\n" - "fmla v3.4s, v24.4s, v9.4s\n" - "str q7, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v23.4s, v10.4s\n" - "str q6, [%[outptr0], x27]\n" - "fmax v5.4s, v5.4s, v30.4s\n" - "fmax v4.4s, v4.4s, v30.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v0.4s, v24.4s, v12.4s\n" - "fmin v5.4s, v5.4s, v29.4s\n" - "fmin v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v30.4s\n" - "str q5, [x25]\n" - "fmax v2.4s, v2.4s, v30.4s\n" - "str q4, [x25, %[output_col_stride1]]\n" - "fmla v0.4s, v21.4s, v9.4s\n" - "fmin v3.4s, v3.4s, v29.4s\n" - "fmin v2.4s, v2.4s, v29.4s\n" - "fmax v1.4s, v1.4s, v30.4s\n" - "str q3, [x25, x27]\n" - "str q2, [x13]\n" - "fmin v1.4s, v1.4s, v29.4s\n" - "fmax v0.4s, v0.4s, v30.4s\n" - "add x25, x25, #16\n" - "str q1, [x13, %[output_col_stride1]]\n" - "fmin v0.4s, v0.4s, v29.4s\n" - "str q0, [x13, x27]\n" - "add x13, x13, #16\n" - "4:\n" - "cbz x19, 7f\n" - "ldr s19, [%[wbptr]]\n" - "mov v8.16b, v19.16b\n" - "ldr s17, [%[wbptr], #4]\n" - "mov v5.16b, v19.16b\n" - "ldr s16, [%[wbptr], #8]\n" - "mov v7.16b, v19.16b\n" - "ldr s15, [%[wbptr], #12]\n" - "mov v2.16b, v19.16b\n" - "ldr s14, [%[wbptr], #16]\n" - "mov v4.16b, v19.16b\n" - "ldr s13, [%[wbptr], #20]\n" - "mov v6.16b, v19.16b\n" - "ldr s12, [%[wbptr], #24]\n" - "mov v1.16b, v19.16b\n" - "ldr s11, [%[wbptr], #28]\n" - "mov v3.16b, v19.16b\n" - "ldr s10, [%[wbptr], #32]\n" - "mov v0.16b, v19.16b\n" - "ldr s9, [%[wbptr], #36]\n" - "ldr s25, [%[inptr0]]\n" - "subs x19, x19, #1\n" - "fmla v8.4s, v25.4s, v17.4s\n" - "ldr s27, [x17]\n" - "ldr s26, [%[inptr0], %[input_col_stride1]]\n" - "ldr s20, [x14]\n" - "ldr s22, [x17, %[input_col_stride1]]\n" - "ldr s28, [%[inptr0], x9]\n" - "fmla v8.4s, v27.4s, v14.4s\n" - "ldr s23, [x21]\n" - "ldr s18, [x14, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x17, #64]\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "prfm pldl1keep, [x14, #64]\n" - "prfm pldl1keep, [x17, x28]\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "prfm pldl1keep, [x21, #64]\n" - "prfm pldl1keep, [x14, x28]\n" - "beq 6f\n" - "5:\n" - "fmla v5.4s, v27.4s, v17.4s\n" - "ldr s27, [x17, x9]\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "ldr s30, [%[inptr0], x15]\n" - "fmla v7.4s, v26.4s, v17.4s\n" - "ldr s31, [x26]\n" - "fmla v5.4s, v20.4s, v14.4s\n" - "ldr s24, [x21, %[input_col_stride1]]\n" - "fmla v8.4s, v20.4s, v11.4s\n" - "prfm pldl1keep, [x17, x22]\n" - "fmla v2.4s, v20.4s, v17.4s\n" - "ldr s29, [x14, x9]\n" - "fmla v5.4s, v22.4s, v16.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x26, #64]\n" - "fmla v7.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x21, x28]\n" - "fmla v4.4s, v22.4s, v17.4s\n" - "ldr s21, [x17, x15]\n" - "fmla v8.4s, v28.4s, v15.4s\n" - "prfm pldl1keep, [x14, x22]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "prfm pldl1keep, [x17, x16]\n" - "fmla v6.4s, v28.4s, v17.4s\n" - "ldr s19, [%[inptr0], x24]\n" - "fmla v5.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "ldr s28, [x26, %[input_col_stride1]]\n" - "fmla v8.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x26, x28]\n" - "fmla v5.4s, v18.4s, v13.4s\n" - "prfm pldl1keep, [x21, x22]\n" - "fmla v7.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x14, x16]\n" - "fmla v2.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x17, x23]\n" - "fmla v4.4s, v18.4s, v14.4s\n" - "prfm pldl1keep, [x26, x22]\n" - "fmla v1.4s, v18.4s, v17.4s\n" - "ldr s25, [x21, x9]\n" - "fmla v8.4s, v27.4s, v12.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v5.4s, v27.4s, v15.4s\n" - "prfm pldl1keep, [x14, x23]\n" - "fmla v7.4s, v27.4s, v13.4s\n" - "prfm pldl1keep, [x26, x16]\n" - "fmla v4.4s, v27.4s, v16.4s\n" - "prfm pldl1keep, [x21, x23]\n" - "fmla v6.4s, v27.4s, v14.4s\n" - "prfm pldl1keep, [x26, x23]\n" - "fmla v3.4s, v27.4s, v17.4s\n" - "ldr s27, [x14, x15]\n" - "fmla v7.4s, v30.4s, v15.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v6.4s, v30.4s, v16.4s\n" - "ldr s26, [x17, x24]\n" - "fmla v2.4s, v31.4s, v11.4s\n" - "ldr s20, [x26, x9]\n" - "fmla v5.4s, v24.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v4.4s, v24.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v2.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v1.4s, v24.4s, v14.4s\n" - "ldr s18, [x21, x15]\n" - "fmla v8.4s, v29.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "fmla v5.4s, v29.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "fmla v7.4s, v29.4s, v10.4s\n" - "add x17, x17, #4\n" - "fmla v2.4s, v29.4s, v15.4s\n" - "prfm pldl1keep, [x17, #64]\n" - "fmla v4.4s, v29.4s, v13.4s\n" - "prfm pldl1keep, [x17, x28]\n" - "fmla v6.4s, v29.4s, v11.4s\n" - "subs x19, x19, #1\n" - "fmla v1.4s, v29.4s, v16.4s\n" - "fmla v3.4s, v29.4s, v14.4s\n" - "fmla v0.4s, v29.4s, v17.4s\n" - "ldr s22, [x14, x24]\n" - "fmla v7.4s, v21.4s, v12.4s\n" - "ldr s23, [x26, x15]\n" - "fmla v4.4s, v21.4s, v15.4s\n" - "add x14, x14, #4\n" - "fmla v6.4s, v21.4s, v13.4s\n" - "prfm pldl1keep, [x14, #64]\n" - "fmla v3.4s, v21.4s, v16.4s\n" - "ldr s24, [x21, x24]\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "prfm pldl1keep, [x14, x28]\n" - "fmla v6.4s, v19.4s, v15.4s\n" - "ldr s21, [x26, x24]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "ldr s19, [%[wbptr]]\n" - "fmla v5.4s, v25.4s, v9.4s\n" - "add x21, x21, #4\n" - "fmla v2.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x21, #64]\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "add x26, x26, #4\n" - "fmla v1.4s, v25.4s, v13.4s\n" - "fmla v3.4s, v25.4s, v11.4s\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "ldr s17, [%[wbptr], #4]\n" - "fmla v7.4s, v27.4s, v9.4s\n" - "ldr s25, [%[inptr0]]\n" - "fmla v4.4s, v27.4s, v12.4s\n" - "fmla v6.4s, v27.4s, v10.4s\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "fmla v3.4s, v27.4s, v13.4s\n" - "fmla v0.4s, v27.4s, v16.4s\n" - "ldr s14, [%[wbptr], #16]\n" - "fmla v6.4s, v26.4s, v12.4s\n" - "ldr s27, [x17]\n" - "fmla v3.4s, v26.4s, v15.4s\n" - "ldr s26, [%[inptr0], %[input_col_stride1]]\n" - "fmla v2.4s, v20.4s, v9.4s\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "fmla v0.4s, v20.4s, v11.4s\n" - "ldr s16, [%[wbptr], #8]\n" - "fmla v4.4s, v18.4s, v9.4s\n" - "ldr s20, [x14]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "fmla v0.4s, v18.4s, v13.4s\n" - "ldr s11, [%[wbptr], #28]\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "movi v30.16b, #0\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "fmla v0.4s, v22.4s, v15.4s\n" - "ldr s13, [%[wbptr], #20]\n" - "fmov v29.4s, #6.0\n" - "fmax v8.4s, v8.4s, v30.4s\n" - "fmla v3.4s, v24.4s, v9.4s\n" - "fmax v7.4s, v7.4s, v30.4s\n" - "fmla v0.4s, v23.4s, v10.4s\n" - "ldr s15, [%[wbptr], #12]\n" - "fmin v8.4s, v8.4s, v29.4s\n" - "ldr s22, [x17, %[input_col_stride1]]\n" - "fmin v7.4s, v7.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v30.4s\n" - "str s8, [%[outptr0]]\n" - "fmla v0.4s, v24.4s, v12.4s\n" - "str s7, [%[outptr0], %[output_col_stride1]]\n" - "fmin v6.4s, v6.4s, v29.4s\n" - "fmax v5.4s, v5.4s, v30.4s\n" - "ldr s10, [%[wbptr], #32]\n" - "str s6, [%[outptr0], x27]\n" - "fmla v0.4s, v21.4s, v9.4s\n" - "fmin v5.4s, v5.4s, v29.4s\n" - "ldr s12, [%[wbptr], #24]\n" - "fmax v4.4s, v4.4s, v30.4s\n" - "ldr s28, [%[inptr0], x9]\n" - "str s5, [x25]\n" - "fmax v3.4s, v3.4s, v30.4s\n" - "fmin v4.4s, v4.4s, v29.4s\n" - "ldr s9, [%[wbptr], #36]\n" - "fmin v3.4s, v3.4s, v29.4s\n" - "ldr s23, [x21]\n" - "str s4, [x25, %[output_col_stride1]]\n" - "fmax v2.4s, v2.4s, v30.4s\n" - "str s3, [x25, x27]\n" - "fmax v1.4s, v1.4s, v30.4s\n" - "fmin v2.4s, v2.4s, v29.4s\n" - "ldr s18, [x14, %[input_col_stride1]]\n" - "fmin v1.4s, v1.4s, v29.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "str s2, [x13]\n" - "fmax v0.4s, v0.4s, v30.4s\n" - "str s1, [x13, %[output_col_stride1]]\n" - "mov v8.16b, v19.16b\n" - "fmin v0.4s, v0.4s, v29.4s\n" - "add x25, x25, #4\n" - "mov v5.16b, v19.16b\n" - "mov v7.16b, v19.16b\n" - "str s0, [x13, x27]\n" - "mov v2.16b, v19.16b\n" - "mov v4.16b, v19.16b\n" - "add x13, x13, #4\n" - "mov v6.16b, v19.16b\n" - "mov v1.16b, v19.16b\n" - "mov v3.16b, v19.16b\n" - "mov v0.16b, v19.16b\n" - "fmla v8.4s, v25.4s, v17.4s\n" - "fmla v8.4s, v27.4s, v14.4s\n" - "bne 5b\n" - "6:\n" - "fmla v5.4s, v27.4s, v17.4s\n" - "ldr s27, [x17, x9]\n" - "fmla v8.4s, v26.4s, v16.4s\n" - "ldr s30, [%[inptr0], x15]\n" - "fmla v7.4s, v26.4s, v17.4s\n" - "ldr s31, [x26]\n" - "fmla v5.4s, v20.4s, v14.4s\n" - "ldr s24, [x21, %[input_col_stride1]]\n" - "fmla v8.4s, v20.4s, v11.4s\n" - "prfm pldl1keep, [x17, x22]\n" - "fmla v2.4s, v20.4s, v17.4s\n" - "ldr s29, [x14, x9]\n" - "fmla v5.4s, v22.4s, v16.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v8.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x26, #64]\n" - "fmla v7.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x21, x28]\n" - "fmla v4.4s, v22.4s, v17.4s\n" - "ldr s21, [x17, x15]\n" - "fmla v8.4s, v28.4s, v15.4s\n" - "prfm pldl1keep, [x14, x22]\n" - "fmla v7.4s, v28.4s, v16.4s\n" - "prfm pldl1keep, [x17, x16]\n" - "fmla v6.4s, v28.4s, v17.4s\n" - "ldr s19, [%[inptr0], x24]\n" - "fmla v5.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [%[inptr0], x23]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "ldr s28, [x26, %[input_col_stride1]]\n" - "fmla v8.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x26, x28]\n" - "fmla v5.4s, v18.4s, v13.4s\n" - "prfm pldl1keep, [x21, x22]\n" - "fmla v7.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x14, x16]\n" - "fmla v2.4s, v18.4s, v16.4s\n" - "prfm pldl1keep, [x17, x23]\n" - "fmla v4.4s, v18.4s, v14.4s\n" - "prfm pldl1keep, [x26, x22]\n" - "fmla v1.4s, v18.4s, v17.4s\n" - "ldr s25, [x21, x9]\n" - "fmla v8.4s, v27.4s, v12.4s\n" - "prfm pldl1keep, [x21, x16]\n" - "fmla v5.4s, v27.4s, v15.4s\n" - "prfm pldl1keep, [x14, x23]\n" - "fmla v7.4s, v27.4s, v13.4s\n" - "prfm pldl1keep, [x26, x16]\n" - "fmla v4.4s, v27.4s, v16.4s\n" - "prfm pldl1keep, [x21, x23]\n" - "fmla v6.4s, v27.4s, v14.4s\n" - "prfm pldl1keep, [x26, x23]\n" - "fmla v3.4s, v27.4s, v17.4s\n" - "ldr s27, [x14, x15]\n" - "fmla v7.4s, v30.4s, v15.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v6.4s, v30.4s, v16.4s\n" - "ldr s26, [x17, x24]\n" - "fmla v2.4s, v31.4s, v11.4s\n" - "ldr s20, [x26, x9]\n" - "fmla v5.4s, v24.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v4.4s, v24.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v2.4s, v24.4s, v13.4s\n" - "add x17, x17, #4\n" - "fmla v1.4s, v24.4s, v14.4s\n" - "ldr s18, [x21, x15]\n" - "fmla v8.4s, v29.4s, v9.4s\n" - "fmla v5.4s, v29.4s, v12.4s\n" - "fmla v7.4s, v29.4s, v10.4s\n" - "fmla v2.4s, v29.4s, v15.4s\n" - "fmla v4.4s, v29.4s, v13.4s\n" - "fmla v6.4s, v29.4s, v11.4s\n" - "fmla v1.4s, v29.4s, v16.4s\n" - "fmla v3.4s, v29.4s, v14.4s\n" - "fmla v0.4s, v29.4s, v17.4s\n" - "ldr s22, [x14, x24]\n" - "fmla v7.4s, v21.4s, v12.4s\n" - "ldr s23, [x26, x15]\n" - "fmla v4.4s, v21.4s, v15.4s\n" - "add x14, x14, #4\n" - "fmla v6.4s, v21.4s, v13.4s\n" - "fmla v3.4s, v21.4s, v16.4s\n" - "fmla v2.4s, v28.4s, v10.4s\n" - "ldr s24, [x21, x24]\n" - "fmla v1.4s, v28.4s, v11.4s\n" - "ldr s21, [x26, x24]\n" - "fmla v6.4s, v19.4s, v15.4s\n" - "add x21, x21, #4\n" - "fmla v5.4s, v25.4s, v9.4s\n" - "add x26, x26, #4\n" - "fmla v2.4s, v25.4s, v12.4s\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "fmla v1.4s, v25.4s, v13.4s\n" - "fmla v3.4s, v25.4s, v11.4s\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "fmla v7.4s, v27.4s, v9.4s\n" - "fmla v4.4s, v27.4s, v12.4s\n" - "fmla v6.4s, v27.4s, v10.4s\n" - "fmla v1.4s, v27.4s, v15.4s\n" - "fmla v3.4s, v27.4s, v13.4s\n" - "fmla v0.4s, v27.4s, v16.4s\n" - "fmla v2.4s, v20.4s, v9.4s\n" - "fmla v6.4s, v26.4s, v12.4s\n" - "fmla v4.4s, v18.4s, v9.4s\n" - "fmla v3.4s, v26.4s, v15.4s\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "fmla v0.4s, v20.4s, v11.4s\n" - "movi v30.16b, #0\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "fmov v29.4s, #6.0\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "fmla v0.4s, v18.4s, v13.4s\n" - "fmax v8.4s, v8.4s, v30.4s\n" - "fmax v7.4s, v7.4s, v30.4s\n" - "fmax v6.4s, v6.4s, v30.4s\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "fmla v0.4s, v22.4s, v15.4s\n" - "fmin v8.4s, v8.4s, v29.4s\n" - "fmin v7.4s, v7.4s, v29.4s\n" - "fmin v6.4s, v6.4s, v29.4s\n" - "str s8, [%[outptr0]]\n" - "fmla v3.4s, v24.4s, v9.4s\n" - "str s7, [%[outptr0], %[output_col_stride1]]\n" - "fmla v0.4s, v23.4s, v10.4s\n" - "str s6, [%[outptr0], x27]\n" - "fmax v5.4s, v5.4s, v30.4s\n" - "fmax v4.4s, v4.4s, v30.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v0.4s, v24.4s, v12.4s\n" - "fmin v5.4s, v5.4s, v29.4s\n" - "fmin v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v30.4s\n" - "str s5, [x25]\n" - "fmax v2.4s, v2.4s, v30.4s\n" - "str s4, [x25, %[output_col_stride1]]\n" - "fmla v0.4s, v21.4s, v9.4s\n" - "fmin v3.4s, v3.4s, v29.4s\n" - "fmin v2.4s, v2.4s, v29.4s\n" - "fmax v1.4s, v1.4s, v30.4s\n" - "str s3, [x25, x27]\n" - "str s2, [x13]\n" - "fmin v1.4s, v1.4s, v29.4s\n" - "fmax v0.4s, v0.4s, v30.4s\n" - "add x25, x25, #4\n" - "str s1, [x13, %[output_col_stride1]]\n" - "fmin v0.4s, v0.4s, v29.4s\n" - "str s0, [x13, x27]\n" - "add x13, x13, #4\n" - "7:\n" - : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr) - : [input_col_stride1] "r" (input_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x13", "x14", "x15", "x16", "x17", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -#endif // __aarch64__ - -template class DepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float, float>; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp deleted file mode 100644 index b798b8cdbe..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_3x3_3x3_2x2_fp32_fp32.cpp +++ /dev/null @@ -1,769 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "impl_fp32_fp32.hpp" - -namespace depthwise -{ - -using namespace neon_convolution_kernels; -using Conv = DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>; - -#ifdef __aarch64__ -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void* weight_bias_ptr, - const float* input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float* output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x15, %[inptr0], %[input_row_stride]\n" - "add x26, %[input_col_stride1], %[input_col_stride1]\n" - "add x21, %[outptr0], %[output_row_stride]\n" - "add x16, x15, %[input_row_stride]\n" - "add x27, x26, %[input_col_stride1]\n" - "add x22, x21, %[output_row_stride]\n" - "add x17, x16, %[input_row_stride]\n" - "add x28, x27, %[input_col_stride1]\n" - "add x23, %[output_col_stride1], %[output_col_stride1]\n" - "add x9, x17, %[input_row_stride]\n" - "add x13, x28, %[input_col_stride1]\n" - "and x24, %[n_channels], #3\n" - "add x19, x9, %[input_row_stride]\n" - "add x14, x13, %[input_col_stride1]\n" - "lsr x25, %[n_channels], #2\n" - "add x20, x19, %[input_row_stride]\n" - "cbz x25, 4f\n" - "1:\n" - "ldr q27, [%[wbptr]]\n" - "subs x25, x25, #1\n" - "mov v17.16b, v27.16b\n" - "ldr q6, [%[wbptr], #16]\n" - "mov v16.16b, v27.16b\n" - "ldr q14, [%[wbptr], #32]\n" - "mov v15.16b, v27.16b\n" - "ldr q13, [%[wbptr], #48]\n" - "mov v2.16b, v27.16b\n" - "ldr q12, [%[wbptr], #64]\n" - "mov v4.16b, v27.16b\n" - "ldr q11, [%[wbptr], #80]\n" - "mov v5.16b, v27.16b\n" - "ldr q10, [%[wbptr], #96]\n" - "mov v1.16b, v27.16b\n" - "ldr q9, [%[wbptr], #112]\n" - "mov v3.16b, v27.16b\n" - "ldr q8, [%[wbptr], #128]\n" - "mov v0.16b, v27.16b\n" - "ldr q7, [%[wbptr], #144]\n" - "ldr q29, [%[inptr0]]\n" - "ldr q28, [x15]\n" - "ldr q26, [%[inptr0], %[input_col_stride1]]\n" - "ldr q22, [x16]\n" - "ldr q20, [x15, %[input_col_stride1]]\n" - "ldr q19, [%[inptr0], x26]\n" - "ldr q30, [x17]\n" - "ldr q18, [x16, %[input_col_stride1]]\n" - "beq 3f\n" - "2:\n" - "fmla v17.4s, v29.4s, v6.4s\n" - "ldr q21, [x15, x26]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "ldr q27, [%[inptr0], x27]\n" - "fmla v15.4s, v19.4s, v6.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v17.4s, v28.4s, v12.4s\n" - "ldr q25, [x9]\n" - "fmla v16.4s, v30.4s, v12.4s\n" - "ldr q24, [x17, %[input_col_stride1]]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v17.4s, v26.4s, v14.4s\n" - "ldr q23, [x16, x26]\n" - "fmla v16.4s, v18.4s, v14.4s\n" - "subs x25, x25, #1\n" - "fmla v15.4s, v27.4s, v14.4s\n" - "ldr q26, [x15, x27]\n" - "fmla v17.4s, v22.4s, v9.4s\n" - "ldr q22, [%[inptr0], x28]\n" - "fmla v16.4s, v25.4s, v9.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v23.4s, v9.4s\n" - "ldr q30, [x19]\n" - "fmla v17.4s, v20.4s, v11.4s\n" - "ldr q29, [x9, %[input_col_stride1]]\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "ldr q28, [x17, x26]\n" - "fmla v4.4s, v23.4s, v6.4s\n" - "fmla v15.4s, v26.4s, v11.4s\n" - "fmla v17.4s, v19.4s, v13.4s\n" - "ldr q24, [x16, x27]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "ldr q25, [x15, x28]\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "fmla v5.4s, v22.4s, v6.4s\n" - "fmla v17.4s, v18.4s, v8.4s\n" - "ldr q19, [%[inptr0], x13]\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "ldr q18, [x20]\n" - "fmla v16.4s, v29.4s, v8.4s\n" - "ldr q22, [x19, %[input_col_stride1]]\n" - "fmla v17.4s, v21.4s, v10.4s\n" - "ldr q26, [x9, x26]\n" - "fmla v2.4s, v29.4s, v14.4s\n" - "ldr q20, [x17, x27]\n" - "fmla v16.4s, v28.4s, v10.4s\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "fmla v17.4s, v23.4s, v7.4s\n" - "ldr q27, [x16, x28]\n" - "fmla v15.4s, v24.4s, v8.4s\n" - "ldr q30, [x15, x13]\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "ldr q24, [%[inptr0], x14]\n" - "str q17, [%[outptr0]]\n" - "fmla v5.4s, v25.4s, v12.4s\n" - "fmla v15.4s, v25.4s, v10.4s\n" - "ldr q28, [x20, %[input_col_stride1]]\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "ldr q17, [x19, x26]\n" - "fmla v5.4s, v19.4s, v14.4s\n" - "ldr q18, [x9, x27]\n" - "fmla v16.4s, v26.4s, v7.4s\n" - "ldr q25, [x17, x28]\n" - "fmla v2.4s, v22.4s, v11.4s\n" - "ldr q22, [x16, x13]\n" - "fmla v4.4s, v26.4s, v9.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "str q16, [x21]\n" - "fmla v1.4s, v26.4s, v6.4s\n" - "fmla v2.4s, v26.4s, v13.4s\n" - "ldr q21, [x15, x14]\n" - "fmla v4.4s, v20.4s, v11.4s\n" - "ldr q23, [x20, x26]\n" - "fmla v15.4s, v27.4s, v7.4s\n" - "ldr q19, [x19, x27]\n" - "fmla v5.4s, v27.4s, v9.4s\n" - "add x15, x15, #16\n" - "fmla v4.4s, v27.4s, v13.4s\n" - "fmla v3.4s, v27.4s, v6.4s\n" - "str q15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v2.4s, v28.4s, v8.4s\n" - "fmla v5.4s, v30.4s, v11.4s\n" - "ldr q29, [x9, x28]\n" - "fmla v1.4s, v17.4s, v12.4s\n" - "ldr q27, [x17, x13]\n" - "fmla v2.4s, v17.4s, v10.4s\n" - "ldr q28, [x16, x14]\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "ldr q26, [x20, x27]\n" - "fmla v4.4s, v18.4s, v8.4s\n" - "ldr q20, [x19, x28]\n" - "fmla v1.4s, v18.4s, v14.4s\n" - "ldr q17, [x9, x13]\n" - "fmla v3.4s, v25.4s, v12.4s\n" - "ldr q18, [x17, x14]\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "ldr q16, [x20, x28]\n" - "fmla v5.4s, v22.4s, v8.4s\n" - "add x16, x16, #16\n" - "fmla v3.4s, v22.4s, v14.4s\n" - "ldr q15, [x19, x13]\n" - "fmla v2.4s, v23.4s, v7.4s\n" - "add x17, x17, #16\n" - "fmla v5.4s, v21.4s, v10.4s\n" - "ldr q21, [x9, x14]\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "ldr q23, [x20, x13]\n" - "str q2, [x22]\n" - "fmla v4.4s, v29.4s, v7.4s\n" - "fmla v3.4s, v29.4s, v9.4s\n" - "ldr q24, [x19, x14]\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "ldr q25, [x20, x14]\n" - "str q4, [x21, %[output_col_stride1]]\n" - "fmla v0.4s, v29.4s, v6.4s\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "ldr q27, [%[wbptr]]\n" - "fmla v1.4s, v29.4s, v13.4s\n" - "ldr q29, [%[inptr0]]\n" - "fmla v5.4s, v28.4s, v7.4s\n" - "ldr q6, [%[wbptr], #16]\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "ldr q28, [x15]\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "ldr q26, [%[inptr0], %[input_col_stride1]]\n" - "str q5, [%[outptr0], x23]\n" - "fmla v0.4s, v20.4s, v12.4s\n" - "fmla v3.4s, v17.4s, v8.4s\n" - "ldr q22, [x16]\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "ldr q20, [x15, %[input_col_stride1]]\n" - "fmla v0.4s, v17.4s, v14.4s\n" - "ldr q12, [%[wbptr], #64]\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "ldr q19, [%[inptr0], x26]\n" - "fmla v1.4s, v16.4s, v7.4s\n" - "ldr q30, [x17]\n" - "fmla v0.4s, v16.4s, v9.4s\n" - "ldr q14, [%[wbptr], #32]\n" - "fmla v3.4s, v21.4s, v7.4s\n" - "ldr q18, [x16, %[input_col_stride1]]\n" - "str q1, [x22, %[output_col_stride1]]\n" - "mov v17.16b, v27.16b\n" - "fmla v0.4s, v15.4s, v11.4s\n" - "ldr q9, [%[wbptr], #112]\n" - "str q3, [x21, x23]\n" - "mov v16.16b, v27.16b\n" - "mov v15.16b, v27.16b\n" - "add x9, x9, #16\n" - "fmla v0.4s, v21.4s, v13.4s\n" - "ldr q11, [%[wbptr], #80]\n" - "mov v2.16b, v27.16b\n" - "add x19, x19, #16\n" - "mov v4.16b, v27.16b\n" - "add x20, x20, #16\n" - "fmla v0.4s, v23.4s, v8.4s\n" - "ldr q13, [%[wbptr], #48]\n" - "mov v5.16b, v27.16b\n" - "add %[outptr0], %[outptr0], #16\n" - "mov v1.16b, v27.16b\n" - "add x21, x21, #16\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "ldr q8, [%[wbptr], #128]\n" - "mov v3.16b, v27.16b\n" - "fmla v0.4s, v25.4s, v7.4s\n" - "ldr q10, [%[wbptr], #96]\n" - "str q0, [x22, x23]\n" - "mov v0.16b, v27.16b\n" - "ldr q7, [%[wbptr], #144]\n" - "add x22, x22, #16\n" - "bne 2b\n" - "3:\n" - "fmla v17.4s, v29.4s, v6.4s\n" - "ldr q21, [x15, x26]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "ldr q27, [%[inptr0], x27]\n" - "fmla v15.4s, v19.4s, v6.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v17.4s, v28.4s, v12.4s\n" - "ldr q25, [x9]\n" - "fmla v16.4s, v30.4s, v12.4s\n" - "ldr q24, [x17, %[input_col_stride1]]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v17.4s, v26.4s, v14.4s\n" - "ldr q23, [x16, x26]\n" - "fmla v16.4s, v18.4s, v14.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v27.4s, v14.4s\n" - "ldr q26, [x15, x27]\n" - "fmla v17.4s, v22.4s, v9.4s\n" - "ldr q22, [%[inptr0], x28]\n" - "fmla v16.4s, v25.4s, v9.4s\n" - "ldr q30, [x19]\n" - "fmla v15.4s, v23.4s, v9.4s\n" - "fmla v4.4s, v23.4s, v6.4s\n" - "fmla v17.4s, v20.4s, v11.4s\n" - "ldr q29, [x9, %[input_col_stride1]]\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "ldr q28, [x17, x26]\n" - "fmla v15.4s, v26.4s, v11.4s\n" - "ldr q24, [x16, x27]\n" - "fmla v17.4s, v19.4s, v13.4s\n" - "ldr q25, [x15, x28]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "fmla v5.4s, v22.4s, v6.4s\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "ldr q19, [%[inptr0], x13]\n" - "fmla v17.4s, v18.4s, v8.4s\n" - "ldr q18, [x20]\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "ldr q22, [x19, %[input_col_stride1]]\n" - "fmla v16.4s, v29.4s, v8.4s\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "fmla v17.4s, v21.4s, v10.4s\n" - "ldr q26, [x9, x26]\n" - "fmla v2.4s, v29.4s, v14.4s\n" - "ldr q20, [x17, x27]\n" - "fmla v16.4s, v28.4s, v10.4s\n" - "ldr q27, [x16, x28]\n" - "fmla v17.4s, v23.4s, v7.4s\n" - "ldr q30, [x15, x13]\n" - "fmla v15.4s, v24.4s, v8.4s\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "fmla v5.4s, v25.4s, v12.4s\n" - "ldr q24, [%[inptr0], x14]\n" - "str q17, [%[outptr0]]\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "fmla v15.4s, v25.4s, v10.4s\n" - "ldr q28, [x20, %[input_col_stride1]]\n" - "fmla v5.4s, v19.4s, v14.4s\n" - "ldr q17, [x19, x26]\n" - "fmla v2.4s, v22.4s, v11.4s\n" - "ldr q18, [x9, x27]\n" - "fmla v16.4s, v26.4s, v7.4s\n" - "ldr q25, [x17, x28]\n" - "fmla v4.4s, v26.4s, v9.4s\n" - "ldr q22, [x16, x13]\n" - "fmla v2.4s, v26.4s, v13.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "str q16, [x21]\n" - "fmla v1.4s, v26.4s, v6.4s\n" - "fmla v4.4s, v20.4s, v11.4s\n" - "ldr q21, [x15, x14]\n" - "fmla v15.4s, v27.4s, v7.4s\n" - "ldr q23, [x20, x26]\n" - "fmla v5.4s, v27.4s, v9.4s\n" - "ldr q19, [x19, x27]\n" - "fmla v4.4s, v27.4s, v13.4s\n" - "add x15, x15, #16\n" - "str q15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v3.4s, v27.4s, v6.4s\n" - "fmla v5.4s, v30.4s, v11.4s\n" - "ldr q29, [x9, x28]\n" - "fmla v2.4s, v28.4s, v8.4s\n" - "ldr q27, [x17, x13]\n" - "fmla v1.4s, v17.4s, v12.4s\n" - "ldr q28, [x16, x14]\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "ldr q26, [x20, x27]\n" - "fmla v2.4s, v17.4s, v10.4s\n" - "ldr q20, [x19, x28]\n" - "fmla v4.4s, v18.4s, v8.4s\n" - "ldr q17, [x9, x13]\n" - "fmla v1.4s, v18.4s, v14.4s\n" - "ldr q18, [x17, x14]\n" - "fmla v3.4s, v25.4s, v12.4s\n" - "add x16, x16, #16\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "ldr q16, [x20, x28]\n" - "fmla v5.4s, v22.4s, v8.4s\n" - "add x17, x17, #16\n" - "fmla v3.4s, v22.4s, v14.4s\n" - "ldr q15, [x19, x13]\n" - "fmla v2.4s, v23.4s, v7.4s\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "fmla v5.4s, v21.4s, v10.4s\n" - "ldr q21, [x9, x14]\n" - "fmla v4.4s, v29.4s, v7.4s\n" - "ldr q23, [x20, x13]\n" - "str q2, [x22]\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "fmla v3.4s, v29.4s, v9.4s\n" - "ldr q24, [x19, x14]\n" - "str q4, [x21, %[output_col_stride1]]\n" - "fmla v0.4s, v29.4s, v6.4s\n" - "fmla v1.4s, v29.4s, v13.4s\n" - "ldr q25, [x20, x14]\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "add x9, x9, #16\n" - "fmla v5.4s, v28.4s, v7.4s\n" - "add x19, x19, #16\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "add x20, x20, #16\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "fmla v0.4s, v20.4s, v12.4s\n" - "str q5, [%[outptr0], x23]\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "fmla v3.4s, v17.4s, v8.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v0.4s, v17.4s, v14.4s\n" - "fmla v1.4s, v16.4s, v7.4s\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "fmla v0.4s, v16.4s, v9.4s\n" - "str q1, [x22, %[output_col_stride1]]\n" - "fmla v3.4s, v21.4s, v7.4s\n" - "fmla v0.4s, v15.4s, v11.4s\n" - "str q3, [x21, x23]\n" - "fmla v0.4s, v21.4s, v13.4s\n" - "add x21, x21, #16\n" - "fmla v0.4s, v23.4s, v8.4s\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "fmla v0.4s, v25.4s, v7.4s\n" - "str q0, [x22, x23]\n" - "add x22, x22, #16\n" - "4:\n" - "cbz x24, 7f\n" - "ldr s27, [%[wbptr]]\n" - "mov v17.16b, v27.16b\n" - "ldr s6, [%[wbptr], #4]\n" - "mov v16.16b, v27.16b\n" - "ldr s14, [%[wbptr], #8]\n" - "mov v15.16b, v27.16b\n" - "ldr s13, [%[wbptr], #12]\n" - "mov v2.16b, v27.16b\n" - "ldr s12, [%[wbptr], #16]\n" - "mov v4.16b, v27.16b\n" - "ldr s11, [%[wbptr], #20]\n" - "mov v5.16b, v27.16b\n" - "ldr s10, [%[wbptr], #24]\n" - "mov v1.16b, v27.16b\n" - "ldr s9, [%[wbptr], #28]\n" - "mov v3.16b, v27.16b\n" - "ldr s8, [%[wbptr], #32]\n" - "mov v0.16b, v27.16b\n" - "ldr s7, [%[wbptr], #36]\n" - "ldr s29, [%[inptr0]]\n" - "subs x24, x24, #1\n" - "ldr s28, [x15]\n" - "ldr s26, [%[inptr0], %[input_col_stride1]]\n" - "ldr s22, [x16]\n" - "ldr s20, [x15, %[input_col_stride1]]\n" - "ldr s19, [%[inptr0], x26]\n" - "ldr s30, [x17]\n" - "ldr s18, [x16, %[input_col_stride1]]\n" - "beq 6f\n" - "5:\n" - "fmla v17.4s, v29.4s, v6.4s\n" - "ldr s21, [x15, x26]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "ldr s27, [%[inptr0], x27]\n" - "fmla v15.4s, v19.4s, v6.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v17.4s, v28.4s, v12.4s\n" - "ldr s25, [x9]\n" - "fmla v16.4s, v30.4s, v12.4s\n" - "ldr s24, [x17, %[input_col_stride1]]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v17.4s, v26.4s, v14.4s\n" - "ldr s23, [x16, x26]\n" - "fmla v16.4s, v18.4s, v14.4s\n" - "subs x24, x24, #1\n" - "fmla v15.4s, v27.4s, v14.4s\n" - "ldr s26, [x15, x27]\n" - "fmla v17.4s, v22.4s, v9.4s\n" - "ldr s22, [%[inptr0], x28]\n" - "fmla v16.4s, v25.4s, v9.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v23.4s, v9.4s\n" - "ldr s30, [x19]\n" - "fmla v17.4s, v20.4s, v11.4s\n" - "ldr s29, [x9, %[input_col_stride1]]\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "ldr s28, [x17, x26]\n" - "fmla v4.4s, v23.4s, v6.4s\n" - "fmla v15.4s, v26.4s, v11.4s\n" - "fmla v17.4s, v19.4s, v13.4s\n" - "ldr s24, [x16, x27]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "ldr s25, [x15, x28]\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "fmla v5.4s, v22.4s, v6.4s\n" - "fmla v17.4s, v18.4s, v8.4s\n" - "ldr s19, [%[inptr0], x13]\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "ldr s18, [x20]\n" - "fmla v16.4s, v29.4s, v8.4s\n" - "ldr s22, [x19, %[input_col_stride1]]\n" - "fmla v17.4s, v21.4s, v10.4s\n" - "ldr s26, [x9, x26]\n" - "fmla v2.4s, v29.4s, v14.4s\n" - "ldr s20, [x17, x27]\n" - "fmla v16.4s, v28.4s, v10.4s\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "fmla v17.4s, v23.4s, v7.4s\n" - "ldr s27, [x16, x28]\n" - "fmla v15.4s, v24.4s, v8.4s\n" - "ldr s30, [x15, x13]\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "ldr s24, [%[inptr0], x14]\n" - "str s17, [%[outptr0]]\n" - "fmla v5.4s, v25.4s, v12.4s\n" - "fmla v15.4s, v25.4s, v10.4s\n" - "ldr s28, [x20, %[input_col_stride1]]\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "ldr s17, [x19, x26]\n" - "fmla v5.4s, v19.4s, v14.4s\n" - "ldr s18, [x9, x27]\n" - "fmla v16.4s, v26.4s, v7.4s\n" - "ldr s25, [x17, x28]\n" - "fmla v2.4s, v22.4s, v11.4s\n" - "ldr s22, [x16, x13]\n" - "fmla v4.4s, v26.4s, v9.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "str s16, [x21]\n" - "fmla v1.4s, v26.4s, v6.4s\n" - "fmla v2.4s, v26.4s, v13.4s\n" - "ldr s21, [x15, x14]\n" - "fmla v4.4s, v20.4s, v11.4s\n" - "ldr s23, [x20, x26]\n" - "fmla v15.4s, v27.4s, v7.4s\n" - "ldr s19, [x19, x27]\n" - "fmla v5.4s, v27.4s, v9.4s\n" - "add x15, x15, #4\n" - "fmla v4.4s, v27.4s, v13.4s\n" - "fmla v3.4s, v27.4s, v6.4s\n" - "str s15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v2.4s, v28.4s, v8.4s\n" - "fmla v5.4s, v30.4s, v11.4s\n" - "ldr s29, [x9, x28]\n" - "fmla v1.4s, v17.4s, v12.4s\n" - "ldr s27, [x17, x13]\n" - "fmla v2.4s, v17.4s, v10.4s\n" - "ldr s28, [x16, x14]\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "ldr s26, [x20, x27]\n" - "fmla v4.4s, v18.4s, v8.4s\n" - "ldr s20, [x19, x28]\n" - "fmla v1.4s, v18.4s, v14.4s\n" - "ldr s17, [x9, x13]\n" - "fmla v3.4s, v25.4s, v12.4s\n" - "ldr s18, [x17, x14]\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "ldr s16, [x20, x28]\n" - "fmla v5.4s, v22.4s, v8.4s\n" - "add x16, x16, #4\n" - "fmla v3.4s, v22.4s, v14.4s\n" - "ldr s15, [x19, x13]\n" - "fmla v2.4s, v23.4s, v7.4s\n" - "add x17, x17, #4\n" - "fmla v5.4s, v21.4s, v10.4s\n" - "ldr s21, [x9, x14]\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "ldr s23, [x20, x13]\n" - "str s2, [x22]\n" - "fmla v4.4s, v29.4s, v7.4s\n" - "fmla v3.4s, v29.4s, v9.4s\n" - "ldr s24, [x19, x14]\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "ldr s25, [x20, x14]\n" - "str s4, [x21, %[output_col_stride1]]\n" - "fmla v0.4s, v29.4s, v6.4s\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "ldr s27, [%[wbptr]]\n" - "fmla v1.4s, v29.4s, v13.4s\n" - "ldr s29, [%[inptr0]]\n" - "fmla v5.4s, v28.4s, v7.4s\n" - "ldr s6, [%[wbptr], #4]\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "ldr s28, [x15]\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "ldr s26, [%[inptr0], %[input_col_stride1]]\n" - "str s5, [%[outptr0], x23]\n" - "fmla v0.4s, v20.4s, v12.4s\n" - "fmla v3.4s, v17.4s, v8.4s\n" - "ldr s22, [x16]\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "ldr s20, [x15, %[input_col_stride1]]\n" - "fmla v0.4s, v17.4s, v14.4s\n" - "ldr s12, [%[wbptr], #16]\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "ldr s19, [%[inptr0], x26]\n" - "fmla v1.4s, v16.4s, v7.4s\n" - "ldr s30, [x17]\n" - "fmla v0.4s, v16.4s, v9.4s\n" - "ldr s14, [%[wbptr], #8]\n" - "fmla v3.4s, v21.4s, v7.4s\n" - "ldr s18, [x16, %[input_col_stride1]]\n" - "str s1, [x22, %[output_col_stride1]]\n" - "mov v17.16b, v27.16b\n" - "fmla v0.4s, v15.4s, v11.4s\n" - "ldr s9, [%[wbptr], #28]\n" - "str s3, [x21, x23]\n" - "mov v16.16b, v27.16b\n" - "mov v15.16b, v27.16b\n" - "add x9, x9, #4\n" - "fmla v0.4s, v21.4s, v13.4s\n" - "ldr s11, [%[wbptr], #20]\n" - "mov v2.16b, v27.16b\n" - "add x19, x19, #4\n" - "mov v4.16b, v27.16b\n" - "add x20, x20, #4\n" - "fmla v0.4s, v23.4s, v8.4s\n" - "ldr s13, [%[wbptr], #12]\n" - "mov v5.16b, v27.16b\n" - "add %[outptr0], %[outptr0], #4\n" - "mov v1.16b, v27.16b\n" - "add x21, x21, #4\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "ldr s8, [%[wbptr], #32]\n" - "mov v3.16b, v27.16b\n" - "fmla v0.4s, v25.4s, v7.4s\n" - "ldr s10, [%[wbptr], #24]\n" - "str s0, [x22, x23]\n" - "mov v0.16b, v27.16b\n" - "ldr s7, [%[wbptr], #36]\n" - "add x22, x22, #4\n" - "bne 5b\n" - "6:\n" - "fmla v17.4s, v29.4s, v6.4s\n" - "ldr s21, [x15, x26]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "ldr s27, [%[inptr0], x27]\n" - "fmla v15.4s, v19.4s, v6.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v17.4s, v28.4s, v12.4s\n" - "ldr s25, [x9]\n" - "fmla v16.4s, v30.4s, v12.4s\n" - "ldr s24, [x17, %[input_col_stride1]]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v17.4s, v26.4s, v14.4s\n" - "ldr s23, [x16, x26]\n" - "fmla v16.4s, v18.4s, v14.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v27.4s, v14.4s\n" - "ldr s26, [x15, x27]\n" - "fmla v17.4s, v22.4s, v9.4s\n" - "ldr s22, [%[inptr0], x28]\n" - "fmla v16.4s, v25.4s, v9.4s\n" - "ldr s30, [x19]\n" - "fmla v15.4s, v23.4s, v9.4s\n" - "fmla v4.4s, v23.4s, v6.4s\n" - "fmla v17.4s, v20.4s, v11.4s\n" - "ldr s29, [x9, %[input_col_stride1]]\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "ldr s28, [x17, x26]\n" - "fmla v15.4s, v26.4s, v11.4s\n" - "ldr s24, [x16, x27]\n" - "fmla v17.4s, v19.4s, v13.4s\n" - "ldr s25, [x15, x28]\n" - "fmla v16.4s, v23.4s, v13.4s\n" - "fmla v5.4s, v22.4s, v6.4s\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "ldr s19, [%[inptr0], x13]\n" - "fmla v17.4s, v18.4s, v8.4s\n" - "ldr s18, [x20]\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "ldr s22, [x19, %[input_col_stride1]]\n" - "fmla v16.4s, v29.4s, v8.4s\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "fmla v17.4s, v21.4s, v10.4s\n" - "ldr s26, [x9, x26]\n" - "fmla v2.4s, v29.4s, v14.4s\n" - "ldr s20, [x17, x27]\n" - "fmla v16.4s, v28.4s, v10.4s\n" - "ldr s27, [x16, x28]\n" - "fmla v17.4s, v23.4s, v7.4s\n" - "ldr s30, [x15, x13]\n" - "fmla v15.4s, v24.4s, v8.4s\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "fmla v5.4s, v25.4s, v12.4s\n" - "ldr s24, [%[inptr0], x14]\n" - "str s17, [%[outptr0]]\n" - "fmla v2.4s, v18.4s, v9.4s\n" - "fmla v15.4s, v25.4s, v10.4s\n" - "ldr s28, [x20, %[input_col_stride1]]\n" - "fmla v5.4s, v19.4s, v14.4s\n" - "ldr s17, [x19, x26]\n" - "fmla v2.4s, v22.4s, v11.4s\n" - "ldr s18, [x9, x27]\n" - "fmla v16.4s, v26.4s, v7.4s\n" - "ldr s25, [x17, x28]\n" - "fmla v4.4s, v26.4s, v9.4s\n" - "ldr s22, [x16, x13]\n" - "fmla v2.4s, v26.4s, v13.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "str s16, [x21]\n" - "fmla v1.4s, v26.4s, v6.4s\n" - "fmla v4.4s, v20.4s, v11.4s\n" - "ldr s21, [x15, x14]\n" - "fmla v15.4s, v27.4s, v7.4s\n" - "ldr s23, [x20, x26]\n" - "fmla v5.4s, v27.4s, v9.4s\n" - "ldr s19, [x19, x27]\n" - "fmla v4.4s, v27.4s, v13.4s\n" - "add x15, x15, #4\n" - "str s15, [%[outptr0], %[output_col_stride1]]\n" - "fmla v3.4s, v27.4s, v6.4s\n" - "fmla v5.4s, v30.4s, v11.4s\n" - "ldr s29, [x9, x28]\n" - "fmla v2.4s, v28.4s, v8.4s\n" - "ldr s27, [x17, x13]\n" - "fmla v1.4s, v17.4s, v12.4s\n" - "ldr s28, [x16, x14]\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "ldr s26, [x20, x27]\n" - "fmla v2.4s, v17.4s, v10.4s\n" - "ldr s20, [x19, x28]\n" - "fmla v4.4s, v18.4s, v8.4s\n" - "ldr s17, [x9, x13]\n" - "fmla v1.4s, v18.4s, v14.4s\n" - "ldr s18, [x17, x14]\n" - "fmla v3.4s, v25.4s, v12.4s\n" - "add x16, x16, #4\n" - "fmla v4.4s, v25.4s, v10.4s\n" - "ldr s16, [x20, x28]\n" - "fmla v5.4s, v22.4s, v8.4s\n" - "add x17, x17, #4\n" - "fmla v3.4s, v22.4s, v14.4s\n" - "ldr s15, [x19, x13]\n" - "fmla v2.4s, v23.4s, v7.4s\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "fmla v5.4s, v21.4s, v10.4s\n" - "ldr s21, [x9, x14]\n" - "fmla v4.4s, v29.4s, v7.4s\n" - "ldr s23, [x20, x13]\n" - "str s2, [x22]\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "fmla v3.4s, v29.4s, v9.4s\n" - "ldr s24, [x19, x14]\n" - "str s4, [x21, %[output_col_stride1]]\n" - "fmla v0.4s, v29.4s, v6.4s\n" - "fmla v1.4s, v29.4s, v13.4s\n" - "ldr s25, [x20, x14]\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "add x9, x9, #4\n" - "fmla v5.4s, v28.4s, v7.4s\n" - "add x19, x19, #4\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "add x20, x20, #4\n" - "fmla v3.4s, v28.4s, v13.4s\n" - "fmla v0.4s, v20.4s, v12.4s\n" - "str s5, [%[outptr0], x23]\n" - "fmla v1.4s, v20.4s, v10.4s\n" - "fmla v3.4s, v17.4s, v8.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v0.4s, v17.4s, v14.4s\n" - "fmla v1.4s, v16.4s, v7.4s\n" - "fmla v3.4s, v18.4s, v10.4s\n" - "fmla v0.4s, v16.4s, v9.4s\n" - "str s1, [x22, %[output_col_stride1]]\n" - "fmla v3.4s, v21.4s, v7.4s\n" - "fmla v0.4s, v15.4s, v11.4s\n" - "str s3, [x21, x23]\n" - "fmla v0.4s, v21.4s, v13.4s\n" - "add x21, x21, #4\n" - "fmla v0.4s, v23.4s, v8.4s\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "fmla v0.4s, v25.4s, v7.4s\n" - "str s0, [x22, x23]\n" - "add x22, x22, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr), [inptr0] "+r" (input), [outptr0] "+r" (output) - : [n_channels] "r" ((long long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_row_stride] "r" (output_row_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x15", "x16", "x17", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x13", "x14", "memory" - ); -} -#endif // __aarch64__ - -template class DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp deleted file mode 100644 index 89d1f2238b..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_4x4_3x3_1x1_fp32_fp32.cpp +++ /dev/null @@ -1,6018 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "impl_fp32_fp32.hpp" - -namespace depthwise -{ - -using namespace neon_convolution_kernels; -using Conv = DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>; - -#ifdef __aarch64__ -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x8, %[inptr0], %[input_row_stride]\n" - "add x15, %[input_col_stride1], %[input_col_stride1]\n" - "add x23, %[outptr0], %[output_row_stride]\n" - "add x9, x8, %[input_row_stride]\n" - "add x16, x15, #64\n" - "add x17, x15, %[input_col_stride1]\n" - "add x10, x9, %[input_row_stride]\n" - "add x7, x17, #64\n" - "add x19, x17, %[input_col_stride1]\n" - "add x11, x10, %[input_row_stride]\n" - "add x20, x19, #64\n" - "add x21, x19, %[input_col_stride1]\n" - "add x12, x11, %[input_row_stride]\n" - "add x22, x21, #64\n" - "add x24, x23, %[output_row_stride]\n" - "add x25, x24, %[output_row_stride]\n" - "add x26, %[output_col_stride1], %[output_col_stride1]\n" - "and x13, %[n_channels], #3\n" - "add x27, x26, %[output_col_stride1]\n" - "lsr x14, %[n_channels], #2\n" - "cbz x14, 4f\n" - "1:\n" - "ldr q14, [%[wbptr]]\n" - "subs x14, x14, #1\n" - "mov v17.16b, v14.16b\n" - "ldr q12, [%[wbptr], #16]\n" - "mov v23.16b, v14.16b\n" - "ldr q11, [%[wbptr], #32]\n" - "mov v24.16b, v14.16b\n" - "ldr q10, [%[wbptr], #48]\n" - "mov v20.16b, v14.16b\n" - "ldr q9, [%[wbptr], #64]\n" - "mov v16.16b, v14.16b\n" - "ldr q8, [%[wbptr], #80]\n" - "mov v13.16b, v14.16b\n" - "ldr q7, [%[wbptr], #96]\n" - "mov v0.16b, v14.16b\n" - "ldr q6, [%[wbptr], #112]\n" - "mov v1.16b, v14.16b\n" - "ldr q5, [%[wbptr], #128]\n" - "mov v2.16b, v14.16b\n" - "ldr q4, [%[wbptr], #144]\n" - "mov v3.16b, v14.16b\n" - "ldr q29, [%[inptr0]]\n" - "fmla v17.4s, v29.4s, v12.4s\n" - "ldr q28, [x8]\n" - "ldr q30, [%[inptr0], %[input_col_stride1]]\n" - "ldr q25, [x9]\n" - "ldr q26, [x8, %[input_col_stride1]]\n" - "ldr q27, [%[inptr0], x15]\n" - "ldr q15, [x10]\n" - "ldr q18, [x9, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x8, #64]\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "prfm pldl1keep, [x9, #64]\n" - "prfm pldl1keep, [x8, x28]\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "prfm pldl1keep, [x10, #64]\n" - "prfm pldl1keep, [x9, x28]\n" - "beq 3f\n" - "2:\n" - "fmla v17.4s, v28.4s, v9.4s\n" - "prfm pldl1keep, [x8, x16]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr q22, [x8, x15]\n" - "fmla v24.4s, v30.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "ldr q29, [%[inptr0], x17]\n" - "fmla v23.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x11, #64]\n" - "fmla v20.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x10, x28]\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "ldr q25, [x11]\n" - "fmla v23.4s, v26.4s, v11.4s\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "prfm pldl1keep, [x8, x7]\n" - "fmla v17.4s, v26.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v16.4s, v26.4s, v12.4s\n" - "ldr q28, [x10, %[input_col_stride1]]\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "prfm pldl1keep, [x12, #64]\n" - "fmla v17.4s, v27.4s, v10.4s\n" - "prfm pldl1keep, [x11, x28]\n" - "fmla v13.4s, v27.4s, v12.4s\n" - "ldr q19, [x9, x15]\n" - "fmla v23.4s, v15.4s, v6.4s\n" - "prfm pldl1keep, [x10, x16]\n" - "fmla v20.4s, v15.4s, v9.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v0.4s, v15.4s, v12.4s\n" - "ldr q21, [x8, x17]\n" - "fmla v17.4s, v18.4s, v5.4s\n" - "prfm pldl1keep, [x8, x20]\n" - "fmla v23.4s, v18.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "fmla v24.4s, v18.4s, v6.4s\n" - "prfm pldl1keep, [x12, x28]\n" - "fmla v20.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x11, x16]\n" - "fmla v16.4s, v18.4s, v9.4s\n" - "prfm pldl1keep, [x10, x7]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "ldr q27, [%[inptr0], x19]\n" - "fmla v17.4s, v22.4s, v7.4s\n" - "prfm pldl1keep, [x9, x20]\n" - "fmla v23.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x8, x22]\n" - "fmla v24.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x12, x16]\n" - "fmla v16.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x11, x7]\n" - "fmla v13.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x20]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "ldr q18, [x12]\n" - "fmla v24.4s, v29.4s, v10.4s\n" - "prfm pldl1keep, [x9, x22]\n" - "fmla v13.4s, v29.4s, v11.4s\n" - "prfm pldl1keep, [x12, x7]\n" - "fmla v3.4s, v29.4s, v12.4s\n" - "ldr q22, [x11, %[input_col_stride1]]\n" - "fmla v20.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x11, x20]\n" - "fmla v0.4s, v25.4s, v9.4s\n" - "ldr q25, [x10, x15]\n" - "fmla v23.4s, v28.4s, v5.4s\n" - "prfm pldl1keep, [x10, x22]\n" - "fmla v20.4s, v28.4s, v8.4s\n" - "prfm pldl1keep, [x12, x20]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "prfm pldl1keep, [x11, x22]\n" - "fmla v0.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [x12, x22]\n" - "fmla v1.4s, v28.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v17.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v23.4s, v19.4s, v7.4s\n" - "subs x14, x14, #1\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v20.4s, v19.4s, v10.4s\n" - "str q17, [%[outptr0]]\n" - "mov v15.16b, v14.16b\n" - "fmla v16.4s, v19.4s, v8.4s\n" - "fmla v13.4s, v19.4s, v6.4s\n" - "fmla v15.4s, v28.4s, v12.4s\n" - "ldr q29, [x9, x17]\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "fmla v2.4s, v19.4s, v9.4s\n" - "fmla v24.4s, v21.4s, v7.4s\n" - "fmla v16.4s, v21.4s, v10.4s\n" - "fmla v13.4s, v21.4s, v8.4s\n" - "fmla v3.4s, v21.4s, v9.4s\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "mov v18.16b, v14.16b\n" - "fmla v20.4s, v22.4s, v5.4s\n" - "fmla v13.4s, v27.4s, v10.4s\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "mov v17.16b, v14.16b\n" - "fmla v18.4s, v19.4s, v12.4s\n" - "mov v19.16b, v14.16b\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "fmla v17.4s, v21.4s, v12.4s\n" - "ldr q26, [x8, x19]\n" - "fmla v1.4s, v22.4s, v6.4s\n" - "fmla v15.4s, v22.4s, v9.4s\n" - "mov v22.16b, v14.16b\n" - "mov v21.16b, v14.16b\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "fmla v20.4s, v25.4s, v7.4s\n" - "fmla v16.4s, v25.4s, v5.4s\n" - "fmla v0.4s, v25.4s, v10.4s\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "str q23, [x23]\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "ldr q28, [%[inptr0], x21]\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "ldr q30, [x12, %[input_col_stride1]]\n" - "fmla v24.4s, v29.4s, v4.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v16.4s, v29.4s, v7.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "str q24, [%[outptr0], %[output_col_stride1]]\n" - "fmla v1.4s, v29.4s, v10.4s\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "ldr q27, [x11, x15]\n" - "fmla v3.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "fmla v22.4s, v29.4s, v12.4s\n" - "ldr q23, [x10, x17]\n" - "fmla v13.4s, v26.4s, v7.4s\n" - "fmla v2.4s, v26.4s, v10.4s\n" - "fmla v3.4s, v26.4s, v8.4s\n" - "fmla v17.4s, v26.4s, v11.4s\n" - "fmla v0.4s, v30.4s, v5.4s\n" - "ldr q24, [x9, x19]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "ldr q29, [x8, x21]\n" - "fmla v3.4s, v28.4s, v10.4s\n" - "ldr q14, [x12, x15]\n" - "fmla v20.4s, v27.4s, v4.4s\n" - "add x8, x8, #16\n" - "fmla v0.4s, v27.4s, v7.4s\n" - "prfm pldl1keep, [x8, #64]\n" - "fmla v1.4s, v27.4s, v5.4s\n" - "prfm pldl1keep, [x8, x28]\n" - "str q20, [x24]\n" - "fmla v15.4s, v27.4s, v8.4s\n" - "fmla v18.4s, v27.4s, v6.4s\n" - "ldr q25, [x11, x17]\n" - "fmla v19.4s, v27.4s, v9.4s\n" - "ldr q30, [x10, x19]\n" - "fmla v16.4s, v23.4s, v4.4s\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "fmla v2.4s, v23.4s, v5.4s\n" - "fmla v15.4s, v23.4s, v10.4s\n" - "fmla v18.4s, v23.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v6.4s\n" - "str q16, [x23, %[output_col_stride1]]\n" - "fmla v19.4s, v23.4s, v11.4s\n" - "fmla v22.4s, v23.4s, v9.4s\n" - "ldr q26, [x9, x21]\n" - "fmla v21.4s, v23.4s, v12.4s\n" - "ldr q27, [x12, x17]\n" - "fmla v13.4s, v24.4s, v4.4s\n" - "ldr q20, [x11, x19]\n" - "fmla v2.4s, v24.4s, v7.4s\n" - "add x9, x9, #16\n" - "fmla v3.4s, v24.4s, v5.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "str q13, [%[outptr0], x26]\n" - "fmla v18.4s, v24.4s, v10.4s\n" - "fmla v17.4s, v24.4s, v8.4s\n" - "ldr q23, [x10, x21]\n" - "fmla v22.4s, v24.4s, v11.4s\n" - "ldr q24, [x12, x19]\n" - "fmla v3.4s, v29.4s, v7.4s\n" - "prfm pldl1keep, [x9, x28]\n" - "fmla v17.4s, v29.4s, v10.4s\n" - "ldr q16, [x11, x21]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "add x10, x10, #16\n" - "fmla v15.4s, v14.4s, v5.4s\n" - "prfm pldl1keep, [x10, #64]\n" - "fmla v19.4s, v14.4s, v6.4s\n" - "ldr q13, [x12, x21]\n" - "str q0, [x25]\n" - "fmla v1.4s, v25.4s, v4.4s\n" - "fmla v15.4s, v25.4s, v7.4s\n" - "ldr q14, [%[wbptr]]\n" - "fmla v18.4s, v25.4s, v5.4s\n" - "add x11, x11, #16\n" - "str q1, [x24, %[output_col_stride1]]\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "fmla v22.4s, v25.4s, v6.4s\n" - "ldr q12, [%[wbptr], #16]\n" - "fmla v21.4s, v25.4s, v9.4s\n" - "ldr q29, [%[inptr0]]\n" - "fmla v2.4s, v30.4s, v4.4s\n" - "ldr q28, [x8]\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "add x12, x12, #16\n" - "fmla v17.4s, v30.4s, v5.4s\n" - "fmla v19.4s, v30.4s, v10.4s\n" - "str q2, [x23, x26]\n" - "fmla v22.4s, v30.4s, v8.4s\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "ldr q9, [%[wbptr], #64]\n" - "fmla v3.4s, v26.4s, v4.4s\n" - "ldr q30, [%[inptr0], %[input_col_stride1]]\n" - "fmla v17.4s, v26.4s, v7.4s\n" - "ldr q25, [x9]\n" - "fmla v22.4s, v26.4s, v10.4s\n" - "ldr q11, [%[wbptr], #32]\n" - "str q3, [%[outptr0], x27]\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v19.4s, v27.4s, v5.4s\n" - "ldr q26, [x8, %[input_col_stride1]]\n" - "fmla v21.4s, v27.4s, v6.4s\n" - "ldr q27, [%[inptr0], x15]\n" - "str q15, [x25, %[output_col_stride1]]\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "fmla v19.4s, v20.4s, v7.4s\n" - "ldr q15, [x10]\n" - "fmla v22.4s, v20.4s, v5.4s\n" - "ldr q6, [%[wbptr], #112]\n" - "str q18, [x24, x26]\n" - "fmla v21.4s, v20.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "ldr q18, [x9, %[input_col_stride1]]\n" - "fmla v22.4s, v23.4s, v7.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v21.4s, v23.4s, v10.4s\n" - "ldr q8, [%[wbptr], #80]\n" - "str q17, [x23, x27]\n" - "fmla v19.4s, v24.4s, v4.4s\n" - "fmla v22.4s, v16.4s, v4.4s\n" - "add x23, x23, #16\n" - "fmla v21.4s, v24.4s, v5.4s\n" - "ldr q10, [%[wbptr], #48]\n" - "str q19, [x25, x26]\n" - "mov v17.16b, v14.16b\n" - "str q22, [x24, x27]\n" - "mov v23.16b, v14.16b\n" - "fmla v21.4s, v16.4s, v7.4s\n" - "ldr q5, [%[wbptr], #128]\n" - "mov v24.16b, v14.16b\n" - "add x24, x24, #16\n" - "mov v20.16b, v14.16b\n" - "mov v16.16b, v14.16b\n" - "fmla v21.4s, v13.4s, v4.4s\n" - "ldr q7, [%[wbptr], #96]\n" - "mov v13.16b, v14.16b\n" - "mov v0.16b, v14.16b\n" - "mov v1.16b, v14.16b\n" - "mov v2.16b, v14.16b\n" - "str q21, [x25, x27]\n" - "mov v3.16b, v14.16b\n" - "ldr q4, [%[wbptr], #144]\n" - "add x25, x25, #16\n" - "fmla v17.4s, v29.4s, v12.4s\n" - "bne 2b\n" - "3:\n" - "fmla v17.4s, v28.4s, v9.4s\n" - "prfm pldl1keep, [x8, x16]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr q22, [x8, x15]\n" - "fmla v24.4s, v30.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "ldr q29, [%[inptr0], x17]\n" - "fmla v23.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x11, #64]\n" - "fmla v20.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x10, x28]\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "ldr q25, [x11]\n" - "fmla v23.4s, v26.4s, v11.4s\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "prfm pldl1keep, [x8, x7]\n" - "fmla v17.4s, v26.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v16.4s, v26.4s, v12.4s\n" - "ldr q28, [x10, %[input_col_stride1]]\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "prfm pldl1keep, [x12, #64]\n" - "fmla v17.4s, v27.4s, v10.4s\n" - "prfm pldl1keep, [x11, x28]\n" - "fmla v13.4s, v27.4s, v12.4s\n" - "ldr q19, [x9, x15]\n" - "fmla v23.4s, v15.4s, v6.4s\n" - "prfm pldl1keep, [x10, x16]\n" - "fmla v20.4s, v15.4s, v9.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v0.4s, v15.4s, v12.4s\n" - "ldr q21, [x8, x17]\n" - "fmla v17.4s, v18.4s, v5.4s\n" - "prfm pldl1keep, [x8, x20]\n" - "fmla v23.4s, v18.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "fmla v24.4s, v18.4s, v6.4s\n" - "prfm pldl1keep, [x12, x28]\n" - "fmla v20.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x11, x16]\n" - "fmla v16.4s, v18.4s, v9.4s\n" - "prfm pldl1keep, [x10, x7]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "ldr q27, [%[inptr0], x19]\n" - "fmla v17.4s, v22.4s, v7.4s\n" - "prfm pldl1keep, [x9, x20]\n" - "fmla v23.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x8, x22]\n" - "fmla v24.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x12, x16]\n" - "fmla v16.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x11, x7]\n" - "fmla v13.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x20]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "ldr q18, [x12]\n" - "fmla v24.4s, v29.4s, v10.4s\n" - "prfm pldl1keep, [x9, x22]\n" - "fmla v13.4s, v29.4s, v11.4s\n" - "prfm pldl1keep, [x12, x7]\n" - "fmla v3.4s, v29.4s, v12.4s\n" - "ldr q22, [x11, %[input_col_stride1]]\n" - "fmla v20.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x11, x20]\n" - "fmla v0.4s, v25.4s, v9.4s\n" - "ldr q25, [x10, x15]\n" - "fmla v23.4s, v28.4s, v5.4s\n" - "prfm pldl1keep, [x10, x22]\n" - "fmla v20.4s, v28.4s, v8.4s\n" - "prfm pldl1keep, [x12, x20]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "prfm pldl1keep, [x11, x22]\n" - "fmla v0.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [x12, x22]\n" - "fmla v1.4s, v28.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v17.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v23.4s, v19.4s, v7.4s\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v20.4s, v19.4s, v10.4s\n" - "fmla v16.4s, v19.4s, v8.4s\n" - "str q17, [%[outptr0]]\n" - "mov v15.16b, v14.16b\n" - "fmla v13.4s, v19.4s, v6.4s\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "fmla v15.4s, v28.4s, v12.4s\n" - "ldr q29, [x9, x17]\n" - "fmla v2.4s, v19.4s, v9.4s\n" - "fmla v24.4s, v21.4s, v7.4s\n" - "fmla v16.4s, v21.4s, v10.4s\n" - "fmla v13.4s, v21.4s, v8.4s\n" - "fmla v3.4s, v21.4s, v9.4s\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "mov v18.16b, v14.16b\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "fmla v13.4s, v27.4s, v10.4s\n" - "fmla v20.4s, v22.4s, v5.4s\n" - "fmla v18.4s, v19.4s, v12.4s\n" - "ldr q26, [x8, x19]\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "ldr q28, [%[inptr0], x21]\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v1.4s, v22.4s, v6.4s\n" - "fmla v15.4s, v22.4s, v9.4s\n" - "mov v17.16b, v14.16b\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "fmla v20.4s, v25.4s, v7.4s\n" - "fmla v16.4s, v25.4s, v5.4s\n" - "fmla v17.4s, v21.4s, v12.4s\n" - "ldr q30, [x12, %[input_col_stride1]]\n" - "str q23, [x23]\n" - "mov v19.16b, v14.16b\n" - "fmla v0.4s, v25.4s, v10.4s\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "mov v22.16b, v14.16b\n" - "mov v21.16b, v14.16b\n" - "fmla v24.4s, v29.4s, v4.4s\n" - "fmla v16.4s, v29.4s, v7.4s\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "fmla v1.4s, v29.4s, v10.4s\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "fmla v3.4s, v29.4s, v6.4s\n" - "str q24, [%[outptr0], %[output_col_stride1]]\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "ldr q27, [x11, x15]\n" - "fmla v22.4s, v29.4s, v12.4s\n" - "ldr q23, [x10, x17]\n" - "fmla v13.4s, v26.4s, v7.4s\n" - "fmla v2.4s, v26.4s, v10.4s\n" - "fmla v3.4s, v26.4s, v8.4s\n" - "fmla v17.4s, v26.4s, v11.4s\n" - "fmla v0.4s, v30.4s, v5.4s\n" - "ldr q24, [x9, x19]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "ldr q29, [x8, x21]\n" - "fmla v3.4s, v28.4s, v10.4s\n" - "ldr q14, [x12, x15]\n" - "fmla v20.4s, v27.4s, v4.4s\n" - "add x8, x8, #16\n" - "fmla v0.4s, v27.4s, v7.4s\n" - "fmla v1.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v27.4s, v8.4s\n" - "fmla v18.4s, v27.4s, v6.4s\n" - "str q20, [x24]\n" - "fmla v19.4s, v27.4s, v9.4s\n" - "fmla v16.4s, v23.4s, v4.4s\n" - "ldr q25, [x11, x17]\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "ldr q30, [x10, x19]\n" - "fmla v2.4s, v23.4s, v5.4s\n" - "fmla v15.4s, v23.4s, v10.4s\n" - "str q16, [x23, %[output_col_stride1]]\n" - "fmla v18.4s, v23.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v6.4s\n" - "ldr q26, [x9, x21]\n" - "fmla v19.4s, v23.4s, v11.4s\n" - "add x9, x9, #16\n" - "fmla v22.4s, v23.4s, v9.4s\n" - "fmla v21.4s, v23.4s, v12.4s\n" - "fmla v13.4s, v24.4s, v4.4s\n" - "ldr q27, [x12, x17]\n" - "fmla v2.4s, v24.4s, v7.4s\n" - "ldr q20, [x11, x19]\n" - "fmla v3.4s, v24.4s, v5.4s\n" - "fmla v18.4s, v24.4s, v10.4s\n" - "str q13, [%[outptr0], x26]\n" - "fmla v17.4s, v24.4s, v8.4s\n" - "fmla v22.4s, v24.4s, v11.4s\n" - "ldr q23, [x10, x21]\n" - "fmla v3.4s, v29.4s, v7.4s\n" - "ldr q24, [x12, x19]\n" - "fmla v17.4s, v29.4s, v10.4s\n" - "ldr q16, [x11, x21]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "add x10, x10, #16\n" - "fmla v15.4s, v14.4s, v5.4s\n" - "add x11, x11, #16\n" - "fmla v19.4s, v14.4s, v6.4s\n" - "ldr q13, [x12, x21]\n" - "str q0, [x25]\n" - "fmla v1.4s, v25.4s, v4.4s\n" - "fmla v15.4s, v25.4s, v7.4s\n" - "add x12, x12, #16\n" - "fmla v18.4s, v25.4s, v5.4s\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "str q1, [x24, %[output_col_stride1]]\n" - "fmla v22.4s, v25.4s, v6.4s\n" - "fmla v21.4s, v25.4s, v9.4s\n" - "fmla v2.4s, v30.4s, v4.4s\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "fmla v17.4s, v30.4s, v5.4s\n" - "fmla v19.4s, v30.4s, v10.4s\n" - "fmla v22.4s, v30.4s, v8.4s\n" - "str q2, [x23, x26]\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "fmla v3.4s, v26.4s, v4.4s\n" - "fmla v17.4s, v26.4s, v7.4s\n" - "fmla v22.4s, v26.4s, v10.4s\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v19.4s, v27.4s, v5.4s\n" - "fmla v21.4s, v27.4s, v6.4s\n" - "str q3, [%[outptr0], x27]\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "str q15, [x25, %[output_col_stride1]]\n" - "fmla v22.4s, v20.4s, v5.4s\n" - "fmla v19.4s, v20.4s, v7.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "str q18, [x24, x26]\n" - "fmla v21.4s, v20.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "fmla v22.4s, v23.4s, v7.4s\n" - "fmla v19.4s, v24.4s, v4.4s\n" - "fmla v21.4s, v23.4s, v10.4s\n" - "str q17, [x23, x27]\n" - "fmla v22.4s, v16.4s, v4.4s\n" - "str q19, [x25, x26]\n" - "add x23, x23, #16\n" - "fmla v21.4s, v24.4s, v5.4s\n" - "str q22, [x24, x27]\n" - "add x24, x24, #16\n" - "fmla v21.4s, v16.4s, v7.4s\n" - "fmla v21.4s, v13.4s, v4.4s\n" - "str q21, [x25, x27]\n" - "add x25, x25, #16\n" - "4:\n" - "cbz x13, 7f\n" - "ldr s14, [%[wbptr]]\n" - "mov v17.16b, v14.16b\n" - "ldr s12, [%[wbptr], #4]\n" - "mov v23.16b, v14.16b\n" - "ldr s11, [%[wbptr], #8]\n" - "mov v24.16b, v14.16b\n" - "ldr s10, [%[wbptr], #12]\n" - "mov v20.16b, v14.16b\n" - "ldr s9, [%[wbptr], #16]\n" - "mov v16.16b, v14.16b\n" - "ldr s8, [%[wbptr], #20]\n" - "mov v13.16b, v14.16b\n" - "ldr s7, [%[wbptr], #24]\n" - "mov v0.16b, v14.16b\n" - "ldr s6, [%[wbptr], #28]\n" - "mov v1.16b, v14.16b\n" - "ldr s5, [%[wbptr], #32]\n" - "mov v2.16b, v14.16b\n" - "ldr s4, [%[wbptr], #36]\n" - "mov v3.16b, v14.16b\n" - "ldr s29, [%[inptr0]]\n" - "fmla v17.4s, v29.4s, v12.4s\n" - "ldr s28, [x8]\n" - "ldr s30, [%[inptr0], %[input_col_stride1]]\n" - "subs x13, x13, #1\n" - "ldr s25, [x9]\n" - "ldr s26, [x8, %[input_col_stride1]]\n" - "ldr s27, [%[inptr0], x15]\n" - "ldr s15, [x10]\n" - "ldr s18, [x9, %[input_col_stride1]]\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x8, #64]\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "prfm pldl1keep, [x9, #64]\n" - "prfm pldl1keep, [x8, x28]\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "prfm pldl1keep, [x10, #64]\n" - "prfm pldl1keep, [x9, x28]\n" - "beq 6f\n" - "5:\n" - "fmla v17.4s, v28.4s, v9.4s\n" - "prfm pldl1keep, [x8, x16]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr s22, [x8, x15]\n" - "fmla v24.4s, v30.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "ldr s29, [%[inptr0], x17]\n" - "fmla v23.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x11, #64]\n" - "fmla v20.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x10, x28]\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "ldr s25, [x11]\n" - "fmla v23.4s, v26.4s, v11.4s\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "prfm pldl1keep, [x8, x7]\n" - "fmla v17.4s, v26.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v16.4s, v26.4s, v12.4s\n" - "ldr s28, [x10, %[input_col_stride1]]\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "prfm pldl1keep, [x12, #64]\n" - "fmla v17.4s, v27.4s, v10.4s\n" - "prfm pldl1keep, [x11, x28]\n" - "fmla v13.4s, v27.4s, v12.4s\n" - "ldr s19, [x9, x15]\n" - "fmla v23.4s, v15.4s, v6.4s\n" - "prfm pldl1keep, [x10, x16]\n" - "fmla v20.4s, v15.4s, v9.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v0.4s, v15.4s, v12.4s\n" - "ldr s21, [x8, x17]\n" - "fmla v17.4s, v18.4s, v5.4s\n" - "prfm pldl1keep, [x8, x20]\n" - "fmla v23.4s, v18.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "fmla v24.4s, v18.4s, v6.4s\n" - "prfm pldl1keep, [x12, x28]\n" - "fmla v20.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x11, x16]\n" - "fmla v16.4s, v18.4s, v9.4s\n" - "prfm pldl1keep, [x10, x7]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "ldr s27, [%[inptr0], x19]\n" - "fmla v17.4s, v22.4s, v7.4s\n" - "prfm pldl1keep, [x9, x20]\n" - "fmla v23.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x8, x22]\n" - "fmla v24.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x12, x16]\n" - "fmla v16.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x11, x7]\n" - "fmla v13.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x20]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "ldr s18, [x12]\n" - "fmla v24.4s, v29.4s, v10.4s\n" - "prfm pldl1keep, [x9, x22]\n" - "fmla v13.4s, v29.4s, v11.4s\n" - "prfm pldl1keep, [x12, x7]\n" - "fmla v3.4s, v29.4s, v12.4s\n" - "ldr s22, [x11, %[input_col_stride1]]\n" - "fmla v20.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x11, x20]\n" - "fmla v0.4s, v25.4s, v9.4s\n" - "ldr s25, [x10, x15]\n" - "fmla v23.4s, v28.4s, v5.4s\n" - "prfm pldl1keep, [x10, x22]\n" - "fmla v20.4s, v28.4s, v8.4s\n" - "prfm pldl1keep, [x12, x20]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "prfm pldl1keep, [x11, x22]\n" - "fmla v0.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [x12, x22]\n" - "fmla v1.4s, v28.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v17.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v23.4s, v19.4s, v7.4s\n" - "subs x13, x13, #1\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v20.4s, v19.4s, v10.4s\n" - "str s17, [%[outptr0]]\n" - "mov v15.16b, v14.16b\n" - "fmla v16.4s, v19.4s, v8.4s\n" - "fmla v13.4s, v19.4s, v6.4s\n" - "fmla v15.4s, v28.4s, v12.4s\n" - "ldr s29, [x9, x17]\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "fmla v2.4s, v19.4s, v9.4s\n" - "fmla v24.4s, v21.4s, v7.4s\n" - "fmla v16.4s, v21.4s, v10.4s\n" - "fmla v13.4s, v21.4s, v8.4s\n" - "fmla v3.4s, v21.4s, v9.4s\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "mov v18.16b, v14.16b\n" - "fmla v20.4s, v22.4s, v5.4s\n" - "fmla v13.4s, v27.4s, v10.4s\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "mov v17.16b, v14.16b\n" - "fmla v18.4s, v19.4s, v12.4s\n" - "mov v19.16b, v14.16b\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "fmla v17.4s, v21.4s, v12.4s\n" - "ldr s26, [x8, x19]\n" - "fmla v1.4s, v22.4s, v6.4s\n" - "fmla v15.4s, v22.4s, v9.4s\n" - "mov v22.16b, v14.16b\n" - "mov v21.16b, v14.16b\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "fmla v20.4s, v25.4s, v7.4s\n" - "fmla v16.4s, v25.4s, v5.4s\n" - "fmla v0.4s, v25.4s, v10.4s\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "str s23, [x23]\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "ldr s28, [%[inptr0], x21]\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "ldr s30, [x12, %[input_col_stride1]]\n" - "fmla v24.4s, v29.4s, v4.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v16.4s, v29.4s, v7.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "prfm pldl1keep, [%[inptr0], x28]\n" - "str s24, [%[outptr0], %[output_col_stride1]]\n" - "fmla v1.4s, v29.4s, v10.4s\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "ldr s27, [x11, x15]\n" - "fmla v3.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "fmla v22.4s, v29.4s, v12.4s\n" - "ldr s23, [x10, x17]\n" - "fmla v13.4s, v26.4s, v7.4s\n" - "fmla v2.4s, v26.4s, v10.4s\n" - "fmla v3.4s, v26.4s, v8.4s\n" - "fmla v17.4s, v26.4s, v11.4s\n" - "fmla v0.4s, v30.4s, v5.4s\n" - "ldr s24, [x9, x19]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "ldr s29, [x8, x21]\n" - "fmla v3.4s, v28.4s, v10.4s\n" - "ldr s14, [x12, x15]\n" - "fmla v20.4s, v27.4s, v4.4s\n" - "add x8, x8, #4\n" - "fmla v0.4s, v27.4s, v7.4s\n" - "prfm pldl1keep, [x8, #64]\n" - "fmla v1.4s, v27.4s, v5.4s\n" - "prfm pldl1keep, [x8, x28]\n" - "str s20, [x24]\n" - "fmla v15.4s, v27.4s, v8.4s\n" - "fmla v18.4s, v27.4s, v6.4s\n" - "ldr s25, [x11, x17]\n" - "fmla v19.4s, v27.4s, v9.4s\n" - "ldr s30, [x10, x19]\n" - "fmla v16.4s, v23.4s, v4.4s\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "fmla v2.4s, v23.4s, v5.4s\n" - "fmla v15.4s, v23.4s, v10.4s\n" - "fmla v18.4s, v23.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v6.4s\n" - "str s16, [x23, %[output_col_stride1]]\n" - "fmla v19.4s, v23.4s, v11.4s\n" - "fmla v22.4s, v23.4s, v9.4s\n" - "ldr s26, [x9, x21]\n" - "fmla v21.4s, v23.4s, v12.4s\n" - "ldr s27, [x12, x17]\n" - "fmla v13.4s, v24.4s, v4.4s\n" - "ldr s20, [x11, x19]\n" - "fmla v2.4s, v24.4s, v7.4s\n" - "add x9, x9, #4\n" - "fmla v3.4s, v24.4s, v5.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "str s13, [%[outptr0], x26]\n" - "fmla v18.4s, v24.4s, v10.4s\n" - "fmla v17.4s, v24.4s, v8.4s\n" - "ldr s23, [x10, x21]\n" - "fmla v22.4s, v24.4s, v11.4s\n" - "ldr s24, [x12, x19]\n" - "fmla v3.4s, v29.4s, v7.4s\n" - "prfm pldl1keep, [x9, x28]\n" - "fmla v17.4s, v29.4s, v10.4s\n" - "ldr s16, [x11, x21]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "add x10, x10, #4\n" - "fmla v15.4s, v14.4s, v5.4s\n" - "prfm pldl1keep, [x10, #64]\n" - "fmla v19.4s, v14.4s, v6.4s\n" - "ldr s13, [x12, x21]\n" - "str s0, [x25]\n" - "fmla v1.4s, v25.4s, v4.4s\n" - "fmla v15.4s, v25.4s, v7.4s\n" - "ldr s14, [%[wbptr]]\n" - "fmla v18.4s, v25.4s, v5.4s\n" - "add x11, x11, #4\n" - "str s1, [x24, %[output_col_stride1]]\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "fmla v22.4s, v25.4s, v6.4s\n" - "ldr s12, [%[wbptr], #4]\n" - "fmla v21.4s, v25.4s, v9.4s\n" - "ldr s29, [%[inptr0]]\n" - "fmla v2.4s, v30.4s, v4.4s\n" - "ldr s28, [x8]\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "add x12, x12, #4\n" - "fmla v17.4s, v30.4s, v5.4s\n" - "fmla v19.4s, v30.4s, v10.4s\n" - "str s2, [x23, x26]\n" - "fmla v22.4s, v30.4s, v8.4s\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "ldr s9, [%[wbptr], #16]\n" - "fmla v3.4s, v26.4s, v4.4s\n" - "ldr s30, [%[inptr0], %[input_col_stride1]]\n" - "fmla v17.4s, v26.4s, v7.4s\n" - "ldr s25, [x9]\n" - "fmla v22.4s, v26.4s, v10.4s\n" - "ldr s11, [%[wbptr], #8]\n" - "str s3, [%[outptr0], x27]\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v19.4s, v27.4s, v5.4s\n" - "ldr s26, [x8, %[input_col_stride1]]\n" - "fmla v21.4s, v27.4s, v6.4s\n" - "ldr s27, [%[inptr0], x15]\n" - "str s15, [x25, %[output_col_stride1]]\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "fmla v19.4s, v20.4s, v7.4s\n" - "ldr s15, [x10]\n" - "fmla v22.4s, v20.4s, v5.4s\n" - "ldr s6, [%[wbptr], #28]\n" - "str s18, [x24, x26]\n" - "fmla v21.4s, v20.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "ldr s18, [x9, %[input_col_stride1]]\n" - "fmla v22.4s, v23.4s, v7.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v21.4s, v23.4s, v10.4s\n" - "ldr s8, [%[wbptr], #20]\n" - "str s17, [x23, x27]\n" - "fmla v19.4s, v24.4s, v4.4s\n" - "fmla v22.4s, v16.4s, v4.4s\n" - "add x23, x23, #4\n" - "fmla v21.4s, v24.4s, v5.4s\n" - "ldr s10, [%[wbptr], #12]\n" - "str s19, [x25, x26]\n" - "mov v17.16b, v14.16b\n" - "str s22, [x24, x27]\n" - "mov v23.16b, v14.16b\n" - "fmla v21.4s, v16.4s, v7.4s\n" - "ldr s5, [%[wbptr], #32]\n" - "mov v24.16b, v14.16b\n" - "add x24, x24, #4\n" - "mov v20.16b, v14.16b\n" - "mov v16.16b, v14.16b\n" - "fmla v21.4s, v13.4s, v4.4s\n" - "ldr s7, [%[wbptr], #24]\n" - "mov v13.16b, v14.16b\n" - "mov v0.16b, v14.16b\n" - "mov v1.16b, v14.16b\n" - "mov v2.16b, v14.16b\n" - "str s21, [x25, x27]\n" - "mov v3.16b, v14.16b\n" - "ldr s4, [%[wbptr], #36]\n" - "add x25, x25, #4\n" - "fmla v17.4s, v29.4s, v12.4s\n" - "bne 5b\n" - "6:\n" - "fmla v17.4s, v28.4s, v9.4s\n" - "prfm pldl1keep, [x8, x16]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr s22, [x8, x15]\n" - "fmla v24.4s, v30.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "ldr s29, [%[inptr0], x17]\n" - "fmla v23.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x11, #64]\n" - "fmla v20.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x10, x28]\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "ldr s25, [x11]\n" - "fmla v23.4s, v26.4s, v11.4s\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "prfm pldl1keep, [x8, x7]\n" - "fmla v17.4s, v26.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x20]\n" - "fmla v16.4s, v26.4s, v12.4s\n" - "ldr s28, [x10, %[input_col_stride1]]\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "prfm pldl1keep, [x12, #64]\n" - "fmla v17.4s, v27.4s, v10.4s\n" - "prfm pldl1keep, [x11, x28]\n" - "fmla v13.4s, v27.4s, v12.4s\n" - "ldr s19, [x9, x15]\n" - "fmla v23.4s, v15.4s, v6.4s\n" - "prfm pldl1keep, [x10, x16]\n" - "fmla v20.4s, v15.4s, v9.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v0.4s, v15.4s, v12.4s\n" - "ldr s21, [x8, x17]\n" - "fmla v17.4s, v18.4s, v5.4s\n" - "prfm pldl1keep, [x8, x20]\n" - "fmla v23.4s, v18.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x22]\n" - "fmla v24.4s, v18.4s, v6.4s\n" - "prfm pldl1keep, [x12, x28]\n" - "fmla v20.4s, v18.4s, v11.4s\n" - "prfm pldl1keep, [x11, x16]\n" - "fmla v16.4s, v18.4s, v9.4s\n" - "prfm pldl1keep, [x10, x7]\n" - "fmla v1.4s, v18.4s, v12.4s\n" - "ldr s27, [%[inptr0], x19]\n" - "fmla v17.4s, v22.4s, v7.4s\n" - "prfm pldl1keep, [x9, x20]\n" - "fmla v23.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x8, x22]\n" - "fmla v24.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x12, x16]\n" - "fmla v16.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x11, x7]\n" - "fmla v13.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x20]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "ldr s18, [x12]\n" - "fmla v24.4s, v29.4s, v10.4s\n" - "prfm pldl1keep, [x9, x22]\n" - "fmla v13.4s, v29.4s, v11.4s\n" - "prfm pldl1keep, [x12, x7]\n" - "fmla v3.4s, v29.4s, v12.4s\n" - "ldr s22, [x11, %[input_col_stride1]]\n" - "fmla v20.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x11, x20]\n" - "fmla v0.4s, v25.4s, v9.4s\n" - "ldr s25, [x10, x15]\n" - "fmla v23.4s, v28.4s, v5.4s\n" - "prfm pldl1keep, [x10, x22]\n" - "fmla v20.4s, v28.4s, v8.4s\n" - "prfm pldl1keep, [x12, x20]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "prfm pldl1keep, [x11, x22]\n" - "fmla v0.4s, v28.4s, v11.4s\n" - "prfm pldl1keep, [x12, x22]\n" - "fmla v1.4s, v28.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v17.4s, v19.4s, v4.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v23.4s, v19.4s, v7.4s\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v20.4s, v19.4s, v10.4s\n" - "fmla v16.4s, v19.4s, v8.4s\n" - "str s17, [%[outptr0]]\n" - "mov v15.16b, v14.16b\n" - "fmla v13.4s, v19.4s, v6.4s\n" - "fmla v1.4s, v19.4s, v11.4s\n" - "fmla v15.4s, v28.4s, v12.4s\n" - "ldr s29, [x9, x17]\n" - "fmla v2.4s, v19.4s, v9.4s\n" - "fmla v24.4s, v21.4s, v7.4s\n" - "fmla v16.4s, v21.4s, v10.4s\n" - "fmla v13.4s, v21.4s, v8.4s\n" - "fmla v3.4s, v21.4s, v9.4s\n" - "fmla v0.4s, v18.4s, v6.4s\n" - "mov v18.16b, v14.16b\n" - "fmla v2.4s, v21.4s, v11.4s\n" - "fmla v13.4s, v27.4s, v10.4s\n" - "fmla v20.4s, v22.4s, v5.4s\n" - "fmla v18.4s, v19.4s, v12.4s\n" - "ldr s26, [x8, x19]\n" - "fmla v3.4s, v27.4s, v11.4s\n" - "ldr s28, [%[inptr0], x21]\n" - "fmla v0.4s, v22.4s, v8.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v1.4s, v22.4s, v6.4s\n" - "fmla v15.4s, v22.4s, v9.4s\n" - "mov v17.16b, v14.16b\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "fmla v20.4s, v25.4s, v7.4s\n" - "fmla v16.4s, v25.4s, v5.4s\n" - "fmla v17.4s, v21.4s, v12.4s\n" - "ldr s30, [x12, %[input_col_stride1]]\n" - "str s23, [x23]\n" - "mov v19.16b, v14.16b\n" - "fmla v0.4s, v25.4s, v10.4s\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "fmla v2.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "mov v22.16b, v14.16b\n" - "mov v21.16b, v14.16b\n" - "fmla v24.4s, v29.4s, v4.4s\n" - "fmla v16.4s, v29.4s, v7.4s\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "fmla v1.4s, v29.4s, v10.4s\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "fmla v3.4s, v29.4s, v6.4s\n" - "str s24, [%[outptr0], %[output_col_stride1]]\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "ldr s27, [x11, x15]\n" - "fmla v22.4s, v29.4s, v12.4s\n" - "ldr s23, [x10, x17]\n" - "fmla v13.4s, v26.4s, v7.4s\n" - "fmla v2.4s, v26.4s, v10.4s\n" - "fmla v3.4s, v26.4s, v8.4s\n" - "fmla v17.4s, v26.4s, v11.4s\n" - "fmla v0.4s, v30.4s, v5.4s\n" - "ldr s24, [x9, x19]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "ldr s29, [x8, x21]\n" - "fmla v3.4s, v28.4s, v10.4s\n" - "ldr s14, [x12, x15]\n" - "fmla v20.4s, v27.4s, v4.4s\n" - "add x8, x8, #4\n" - "fmla v0.4s, v27.4s, v7.4s\n" - "fmla v1.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v27.4s, v8.4s\n" - "fmla v18.4s, v27.4s, v6.4s\n" - "str s20, [x24]\n" - "fmla v19.4s, v27.4s, v9.4s\n" - "fmla v16.4s, v23.4s, v4.4s\n" - "ldr s25, [x11, x17]\n" - "fmla v1.4s, v23.4s, v7.4s\n" - "ldr s30, [x10, x19]\n" - "fmla v2.4s, v23.4s, v5.4s\n" - "fmla v15.4s, v23.4s, v10.4s\n" - "str s16, [x23, %[output_col_stride1]]\n" - "fmla v18.4s, v23.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v6.4s\n" - "ldr s26, [x9, x21]\n" - "fmla v19.4s, v23.4s, v11.4s\n" - "add x9, x9, #4\n" - "fmla v22.4s, v23.4s, v9.4s\n" - "fmla v21.4s, v23.4s, v12.4s\n" - "fmla v13.4s, v24.4s, v4.4s\n" - "ldr s27, [x12, x17]\n" - "fmla v2.4s, v24.4s, v7.4s\n" - "ldr s20, [x11, x19]\n" - "fmla v3.4s, v24.4s, v5.4s\n" - "fmla v18.4s, v24.4s, v10.4s\n" - "str s13, [%[outptr0], x26]\n" - "fmla v17.4s, v24.4s, v8.4s\n" - "fmla v22.4s, v24.4s, v11.4s\n" - "ldr s23, [x10, x21]\n" - "fmla v3.4s, v29.4s, v7.4s\n" - "ldr s24, [x12, x19]\n" - "fmla v17.4s, v29.4s, v10.4s\n" - "ldr s16, [x11, x21]\n" - "fmla v0.4s, v14.4s, v4.4s\n" - "add x10, x10, #4\n" - "fmla v15.4s, v14.4s, v5.4s\n" - "add x11, x11, #4\n" - "fmla v19.4s, v14.4s, v6.4s\n" - "ldr s13, [x12, x21]\n" - "str s0, [x25]\n" - "fmla v1.4s, v25.4s, v4.4s\n" - "fmla v15.4s, v25.4s, v7.4s\n" - "add x12, x12, #4\n" - "fmla v18.4s, v25.4s, v5.4s\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "str s1, [x24, %[output_col_stride1]]\n" - "fmla v22.4s, v25.4s, v6.4s\n" - "fmla v21.4s, v25.4s, v9.4s\n" - "fmla v2.4s, v30.4s, v4.4s\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "fmla v17.4s, v30.4s, v5.4s\n" - "fmla v19.4s, v30.4s, v10.4s\n" - "fmla v22.4s, v30.4s, v8.4s\n" - "str s2, [x23, x26]\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "fmla v3.4s, v26.4s, v4.4s\n" - "fmla v17.4s, v26.4s, v7.4s\n" - "fmla v22.4s, v26.4s, v10.4s\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v19.4s, v27.4s, v5.4s\n" - "fmla v21.4s, v27.4s, v6.4s\n" - "str s3, [%[outptr0], x27]\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "str s15, [x25, %[output_col_stride1]]\n" - "fmla v22.4s, v20.4s, v5.4s\n" - "fmla v19.4s, v20.4s, v7.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "str s18, [x24, x26]\n" - "fmla v21.4s, v20.4s, v8.4s\n" - "fmla v17.4s, v23.4s, v4.4s\n" - "fmla v22.4s, v23.4s, v7.4s\n" - "fmla v19.4s, v24.4s, v4.4s\n" - "fmla v21.4s, v23.4s, v10.4s\n" - "str s17, [x23, x27]\n" - "fmla v22.4s, v16.4s, v4.4s\n" - "str s19, [x25, x26]\n" - "add x23, x23, #4\n" - "fmla v21.4s, v24.4s, v5.4s\n" - "str s22, [x24, x27]\n" - "add x24, x24, #4\n" - "fmla v21.4s, v16.4s, v7.4s\n" - "fmla v21.4s, v13.4s, v4.4s\n" - "str s21, [x25, x27]\n" - "add x25, x25, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x7", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x8", "x9", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::None>( - int n_channels, - const void *weight_bias_ptr, - const float *inptrs[6][6], - float *outptrs[4][4] -) -{ - __asm __volatile( - "mov x27, xzr\n" - "mov x28, xzr\n" - "and x15, %[n_channels], #3\n" - "lsr x16, %[n_channels], #2\n" - "cbz x16, 4f\n" - "1:\n" - "ldr q13, [%[wbptr]]\n" - "ldr x17, [%[inptrs], 0]\n" - "mov v18.16b, v13.16b\n" - "ldr q12, [%[wbptr], #16]\n" - "mov v22.16b, v13.16b\n" - "ldr q11, [%[wbptr], #32]\n" - "mov v23.16b, v13.16b\n" - "ldr q10, [%[wbptr], #48]\n" - "mov v19.16b, v13.16b\n" - "ldr q9, [%[wbptr], #64]\n" - "mov v17.16b, v13.16b\n" - "ldr q8, [%[wbptr], #80]\n" - "mov v14.16b, v13.16b\n" - "ldr q7, [%[wbptr], #96]\n" - "mov v0.16b, v13.16b\n" - "ldr q6, [%[wbptr], #112]\n" - "mov v1.16b, v13.16b\n" - "ldr q5, [%[wbptr], #128]\n" - "mov v2.16b, v13.16b\n" - "ldr q4, [%[wbptr], #144]\n" - "ldr q29, [x17, x27]\n" - "ldr x7, [%[inptrs], 48]\n" - "fmla v18.4s, v29.4s, v12.4s\n" - "ldr x17, [%[inptrs], 8]\n" - "ldr q27, [x7, x27]\n" - "ldr x19, [%[inptrs], 96]\n" - "ldr q28, [x17, x27]\n" - "ldr x7, [%[inptrs], 56]\n" - "ldr q25, [x19, x27]\n" - "ldr x17, [%[inptrs], 16]\n" - "ldr q16, [x7, x27]\n" - "ldr x20, [%[inptrs], 144]\n" - "ldr q15, [x17, x27]\n" - "ldr x19, [%[inptrs], 104]\n" - "ldr q21, [x20, x27]\n" - "subs x16, x16, #1\n" - "ldr q29, [x19, x27]\n" - "beq 3f\n" - "2:\n" - "mov v3.16b, v13.16b\n" - "ldr x7, [%[inptrs], 64]\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "ldr x17, [%[inptrs], 24]\n" - "fmla v22.4s, v27.4s, v12.4s\n" - "ldr q30, [x7, x27]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr x21, [%[inptrs], 192]\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "ldr x20, [%[inptrs], 152]\n" - "fmla v18.4s, v28.4s, v11.4s\n" - "ldr q24, [x17, x27]\n" - "fmla v22.4s, v25.4s, v9.4s\n" - "ldr x19, [%[inptrs], 112]\n" - "fmla v23.4s, v16.4s, v9.4s\n" - "ldr x7, [%[inptrs], 72]\n" - "fmla v17.4s, v16.4s, v12.4s\n" - "ldr x17, [%[inptrs], 32]\n" - "fmla v18.4s, v25.4s, v6.4s\n" - "ldr q31, [x21, x27]\n" - "fmla v22.4s, v16.4s, v11.4s\n" - "ldr x22, [%[inptrs], 240]\n" - "fmla v23.4s, v15.4s, v11.4s\n" - "ldr x21, [%[inptrs], 200]\n" - "fmla v14.4s, v15.4s, v12.4s\n" - "ldr x23, [%[outptrs], 0]\n" - "fmla v18.4s, v16.4s, v8.4s\n" - "ldr q25, [x20, x27]\n" - "fmla v22.4s, v21.4s, v6.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v19.4s, v21.4s, v9.4s\n" - "ldr x24, [%[outptrs], 32]\n" - "fmla v0.4s, v21.4s, v12.4s\n" - "ldr q21, [x19, x27]\n" - "fmla v18.4s, v15.4s, v10.4s\n" - "ldr q20, [x7, x27]\n" - "fmla v22.4s, v29.4s, v8.4s\n" - "ldr x19, [%[inptrs], 120]\n" - "fmla v23.4s, v29.4s, v6.4s\n" - "ldr x7, [%[inptrs], 80]\n" - "fmla v19.4s, v29.4s, v11.4s\n" - "ldr x25, [%[outptrs], 64]\n" - "fmla v18.4s, v29.4s, v5.4s\n" - "ldr x26, [%[outptrs], 96]\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "ldr q26, [x17, x27]\n" - "fmla v22.4s, v30.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "ldr x17, [%[inptrs], 40]\n" - "fmla v23.4s, v30.4s, v8.4s\n" - "subs x16, x16, #1\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "fmla v14.4s, v30.4s, v9.4s\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "ldr q27, [x22, x27]\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "ldr x22, [%[inptrs], 248]\n" - "fmla v23.4s, v24.4s, v10.4s\n" - "fmla v19.4s, v31.4s, v6.4s\n" - "fmla v14.4s, v24.4s, v11.4s\n" - "ldr q30, [x21, x27]\n" - "fmla v0.4s, v31.4s, v9.4s\n" - "ldr q24, [x20, x27]\n" - "fmla v22.4s, v25.4s, v5.4s\n" - "ldr x21, [%[inptrs], 208]\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "fmla v1.4s, v25.4s, v9.4s\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v21.4s, v4.4s\n" - "fmla v22.4s, v21.4s, v7.4s\n" - "fmla v23.4s, v21.4s, v5.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v14.4s, v21.4s, v6.4s\n" - "fmla v17.4s, v21.4s, v8.4s\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "str q18, [x23, x28]\n" - "mov v16.16b, v13.16b\n" - "fmla v2.4s, v21.4s, v9.4s\n" - "ldr x23, [%[outptrs], 8]\n" - "fmla v23.4s, v20.4s, v7.4s\n" - "fmla v14.4s, v20.4s, v8.4s\n" - "fmla v16.4s, v25.4s, v12.4s\n" - "ldr q25, [x19, x27]\n" - "fmla v17.4s, v20.4s, v10.4s\n" - "ldr x19, [%[inptrs], 128]\n" - "fmla v2.4s, v20.4s, v11.4s\n" - "fmla v3.4s, v20.4s, v9.4s\n" - "fmla v14.4s, v26.4s, v10.4s\n" - "fmla v0.4s, v27.4s, v6.4s\n" - "mov v15.16b, v13.16b\n" - "fmla v19.4s, v30.4s, v5.4s\n" - "fmla v1.4s, v30.4s, v6.4s\n" - "fmla v16.4s, v30.4s, v9.4s\n" - "fmla v3.4s, v26.4s, v11.4s\n" - "ldr q29, [x7, x27]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "ldr q27, [x17, x27]\n" - "fmla v0.4s, v30.4s, v8.4s\n" - "ldr q28, [x22, x27]\n" - "fmla v22.4s, v24.4s, v4.4s\n" - "ldr x7, [%[inptrs], 88]\n" - "fmla v19.4s, v24.4s, v7.4s\n" - "ldr x22, [%[inptrs], 256]\n" - "fmla v17.4s, v24.4s, v5.4s\n" - "ldr x17, [%[inptrs], 0]\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "fmla v1.4s, v24.4s, v8.4s\n" - "str q22, [x24, x28]\n" - "mov v18.16b, v13.16b\n" - "fmla v2.4s, v24.4s, v6.4s\n" - "ldr x24, [%[outptrs], 40]\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "fmla v18.4s, v20.4s, v12.4s\n" - "ldr q22, [x21, x27]\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "ldr x21, [%[inptrs], 216]\n" - "fmla v17.4s, v25.4s, v7.4s\n" - "fmla v14.4s, v25.4s, v5.4s\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "fmla v2.4s, v25.4s, v8.4s\n" - "fmla v3.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "str q23, [x23, x28]\n" - "mov v21.16b, v13.16b\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "ldr x23, [%[outptrs], 16]\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "fmla v2.4s, v29.4s, v10.4s\n" - "fmla v21.4s, v24.4s, v12.4s\n" - "ldr q30, [x20, x27]\n" - "fmla v3.4s, v29.4s, v8.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "ldr q31, [x19, x27]\n" - "fmla v0.4s, v28.4s, v5.4s\n" - "ldr x19, [%[inptrs], 136]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "ldr q26, [x7, x27]\n" - "fmla v3.4s, v27.4s, v10.4s\n" - "ldr q23, [x22, x27]\n" - "fmla v19.4s, v22.4s, v4.4s\n" - "ldr x22, [%[inptrs], 264]\n" - "fmla v0.4s, v22.4s, v7.4s\n" - "ldr x7, [%[inptrs], 48]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "fmla v16.4s, v22.4s, v8.4s\n" - "fmla v15.4s, v22.4s, v6.4s\n" - "fmla v21.4s, v22.4s, v9.4s\n" - "str q19, [x25, x28]\n" - "mov v24.16b, v13.16b\n" - "mov v20.16b, v13.16b\n" - "ldr q27, [x21, x27]\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "ldr x21, [%[inptrs], 224]\n" - "fmla v24.4s, v25.4s, v12.4s\n" - "ldr q28, [x20, x27]\n" - "fmla v1.4s, v30.4s, v7.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v2.4s, v30.4s, v5.4s\n" - "ldr x25, [%[outptrs], 72]\n" - "str q17, [x24, x28]\n" - "fmla v16.4s, v30.4s, v10.4s\n" - "fmla v15.4s, v30.4s, v8.4s\n" - "ldr q22, [x19, x27]\n" - "fmla v18.4s, v30.4s, v6.4s\n" - "ldr x24, [%[outptrs], 48]\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "ldr x19, [%[inptrs], 96]\n" - "fmla v24.4s, v30.4s, v9.4s\n" - "fmla v20.4s, v30.4s, v12.4s\n" - "fmla v14.4s, v31.4s, v4.4s\n" - "ldr q30, [x22, x27]\n" - "fmla v2.4s, v31.4s, v7.4s\n" - "ldr q19, [x21, x27]\n" - "fmla v3.4s, v31.4s, v5.4s\n" - "ldr x22, [%[inptrs], 272]\n" - "fmla v15.4s, v31.4s, v10.4s\n" - "ldr x21, [%[inptrs], 232]\n" - "str q14, [x23, x28]\n" - "fmla v18.4s, v31.4s, v8.4s\n" - "fmla v24.4s, v31.4s, v11.4s\n" - "ldr q31, [x20, x27]\n" - "fmla v3.4s, v26.4s, v7.4s\n" - "ldr q17, [x22, x27]\n" - "fmla v0.4s, v23.4s, v4.4s\n" - "ldr x22, [%[inptrs], 280]\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "ldr q14, [x21, x27]\n" - "fmla v16.4s, v23.4s, v5.4s\n" - "ldr x23, [%[outptrs], 24]\n" - "fmla v21.4s, v23.4s, v6.4s\n" - "ldr q26, [x22, x27]\n" - "str q0, [x26, x28]\n" - "fmla v1.4s, v27.4s, v4.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "ldr q13, [%[wbptr]]\n" - "fmla v16.4s, v27.4s, v7.4s\n" - "ldr x26, [%[outptrs], 104]\n" - "fmla v21.4s, v27.4s, v8.4s\n" - "add x27, x27, #16\n" - "str q1, [x25, x28]\n" - "fmla v24.4s, v27.4s, v6.4s\n" - "fmla v20.4s, v27.4s, v9.4s\n" - "ldr q12, [%[wbptr], #16]\n" - "fmla v2.4s, v28.4s, v4.4s\n" - "ldr q29, [x17, x27]\n" - "fmla v15.4s, v28.4s, v7.4s\n" - "ldr q27, [x7, x27]\n" - "fmla v18.4s, v28.4s, v5.4s\n" - "ldr x25, [%[outptrs], 80]\n" - "fmla v21.4s, v28.4s, v10.4s\n" - "ldr x17, [%[inptrs], 8]\n" - "str q2, [x24, x28]\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "fmla v20.4s, v28.4s, v11.4s\n" - "ldr q9, [%[wbptr], #64]\n" - "fmla v3.4s, v22.4s, v4.4s\n" - "ldr q28, [x17, x27]\n" - "fmla v18.4s, v22.4s, v7.4s\n" - "ldr q25, [x19, x27]\n" - "fmla v24.4s, v22.4s, v10.4s\n" - "ldr x24, [%[outptrs], 56]\n" - "fmla v16.4s, v30.4s, v4.4s\n" - "ldr q11, [%[wbptr], #32]\n" - "str q3, [x23, x28]\n" - "fmla v21.4s, v30.4s, v5.4s\n" - "fmla v20.4s, v30.4s, v6.4s\n" - "ldr x7, [%[inptrs], 56]\n" - "fmla v15.4s, v19.4s, v4.4s\n" - "ldr x17, [%[inptrs], 16]\n" - "str q16, [x26, x28]\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v21.4s, v19.4s, v7.4s\n" - "ldr q16, [x7, x27]\n" - "fmla v20.4s, v19.4s, v8.4s\n" - "ldr q6, [%[wbptr], #112]\n" - "str q15, [x25, x28]\n" - "fmla v18.4s, v31.4s, v4.4s\n" - "fmla v24.4s, v31.4s, v7.4s\n" - "ldr q15, [x17, x27]\n" - "fmla v21.4s, v17.4s, v4.4s\n" - "ldr x25, [%[outptrs], 88]\n" - "fmla v20.4s, v31.4s, v10.4s\n" - "ldr q8, [%[wbptr], #80]\n" - "str q18, [x24, x28]\n" - "mov v18.16b, v13.16b\n" - "fmla v24.4s, v14.4s, v4.4s\n" - "ldr x26, [%[outptrs], 112]\n" - "mov v22.16b, v13.16b\n" - "ldr x20, [%[inptrs], 144]\n" - "str q21, [x26, x28]\n" - "fmla v20.4s, v17.4s, v5.4s\n" - "mov v23.16b, v13.16b\n" - "ldr q10, [%[wbptr], #48]\n" - "str q24, [x25, x28]\n" - "mov v19.16b, v13.16b\n" - "mov v17.16b, v13.16b\n" - "ldr q21, [x20, x27]\n" - "fmla v20.4s, v14.4s, v7.4s\n" - "ldr q5, [%[wbptr], #128]\n" - "mov v14.16b, v13.16b\n" - "ldr x26, [%[outptrs], 120]\n" - "mov v0.16b, v13.16b\n" - "ldr x19, [%[inptrs], 104]\n" - "mov v1.16b, v13.16b\n" - "mov v2.16b, v13.16b\n" - "fmla v20.4s, v26.4s, v4.4s\n" - "ldr q7, [%[wbptr], #96]\n" - "fmla v18.4s, v29.4s, v12.4s\n" - "ldr q29, [x19, x27]\n" - "str q20, [x26, x28]\n" - "ldr q4, [%[wbptr], #144]\n" - "add x28, x28, #16\n" - "bne 2b\n" - "3:\n" - "mov v3.16b, v13.16b\n" - "ldr x7, [%[inptrs], 64]\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "ldr x17, [%[inptrs], 24]\n" - "fmla v22.4s, v27.4s, v12.4s\n" - "ldr q30, [x7, x27]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr x21, [%[inptrs], 192]\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "ldr x20, [%[inptrs], 152]\n" - "fmla v18.4s, v28.4s, v11.4s\n" - "ldr q24, [x17, x27]\n" - "fmla v22.4s, v25.4s, v9.4s\n" - "ldr x19, [%[inptrs], 112]\n" - "fmla v23.4s, v16.4s, v9.4s\n" - "ldr x7, [%[inptrs], 72]\n" - "fmla v17.4s, v16.4s, v12.4s\n" - "ldr x17, [%[inptrs], 32]\n" - "fmla v18.4s, v25.4s, v6.4s\n" - "ldr q31, [x21, x27]\n" - "fmla v22.4s, v16.4s, v11.4s\n" - "ldr x22, [%[inptrs], 240]\n" - "fmla v23.4s, v15.4s, v11.4s\n" - "ldr x21, [%[inptrs], 200]\n" - "fmla v14.4s, v15.4s, v12.4s\n" - "ldr x23, [%[outptrs], 0]\n" - "fmla v18.4s, v16.4s, v8.4s\n" - "ldr q25, [x20, x27]\n" - "fmla v22.4s, v21.4s, v6.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v19.4s, v21.4s, v9.4s\n" - "ldr x24, [%[outptrs], 32]\n" - "fmla v0.4s, v21.4s, v12.4s\n" - "ldr q21, [x19, x27]\n" - "fmla v18.4s, v15.4s, v10.4s\n" - "ldr q20, [x7, x27]\n" - "fmla v22.4s, v29.4s, v8.4s\n" - "ldr x19, [%[inptrs], 120]\n" - "fmla v23.4s, v29.4s, v6.4s\n" - "ldr x7, [%[inptrs], 80]\n" - "fmla v19.4s, v29.4s, v11.4s\n" - "ldr x25, [%[outptrs], 64]\n" - "fmla v18.4s, v29.4s, v5.4s\n" - "ldr x26, [%[outptrs], 96]\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "ldr q26, [x17, x27]\n" - "fmla v22.4s, v30.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "ldr x17, [%[inptrs], 40]\n" - "fmla v23.4s, v30.4s, v8.4s\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "fmla v14.4s, v30.4s, v9.4s\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "mov v16.16b, v13.16b\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "fmla v19.4s, v31.4s, v6.4s\n" - "fmla v0.4s, v31.4s, v9.4s\n" - "mov v15.16b, v13.16b\n" - "fmla v23.4s, v24.4s, v10.4s\n" - "fmla v14.4s, v24.4s, v11.4s\n" - "ldr q27, [x22, x27]\n" - "fmla v22.4s, v25.4s, v5.4s\n" - "ldr x22, [%[inptrs], 248]\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "fmla v1.4s, v25.4s, v9.4s\n" - "fmla v16.4s, v25.4s, v12.4s\n" - "ldr q30, [x21, x27]\n" - "fmla v18.4s, v21.4s, v4.4s\n" - "ldr x21, [%[inptrs], 208]\n" - "fmla v22.4s, v21.4s, v7.4s\n" - "fmla v23.4s, v21.4s, v5.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v17.4s, v21.4s, v8.4s\n" - "fmla v14.4s, v21.4s, v6.4s\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "str q18, [x23, x28]\n" - "mov v18.16b, v13.16b\n" - "fmla v2.4s, v21.4s, v9.4s\n" - "ldr x23, [%[outptrs], 8]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "ldr q24, [x20, x27]\n" - "fmla v23.4s, v20.4s, v7.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v17.4s, v20.4s, v10.4s\n" - "fmla v14.4s, v20.4s, v8.4s\n" - "fmla v2.4s, v20.4s, v11.4s\n" - "fmla v3.4s, v20.4s, v9.4s\n" - "fmla v18.4s, v20.4s, v12.4s\n" - "ldr q25, [x19, x27]\n" - "fmla v0.4s, v27.4s, v6.4s\n" - "ldr q29, [x7, x27]\n" - "fmla v14.4s, v26.4s, v10.4s\n" - "ldr x19, [%[inptrs], 128]\n" - "fmla v3.4s, v26.4s, v11.4s\n" - "ldr q27, [x17, x27]\n" - "fmla v19.4s, v30.4s, v5.4s\n" - "ldr x7, [%[inptrs], 88]\n" - "fmla v0.4s, v30.4s, v8.4s\n" - "fmla v1.4s, v30.4s, v6.4s\n" - "fmla v16.4s, v30.4s, v9.4s\n" - "ldr q28, [x22, x27]\n" - "fmla v22.4s, v24.4s, v4.4s\n" - "ldr x22, [%[inptrs], 256]\n" - "fmla v19.4s, v24.4s, v7.4s\n" - "fmla v17.4s, v24.4s, v5.4s\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "fmla v1.4s, v24.4s, v8.4s\n" - "fmla v2.4s, v24.4s, v6.4s\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "str q22, [x24, x28]\n" - "mov v21.16b, v13.16b\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "ldr x24, [%[outptrs], 40]\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "fmla v17.4s, v25.4s, v7.4s\n" - "fmla v21.4s, v24.4s, v12.4s\n" - "ldr q22, [x21, x27]\n" - "fmla v14.4s, v25.4s, v5.4s\n" - "ldr x21, [%[inptrs], 216]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "fmla v2.4s, v25.4s, v8.4s\n" - "str q23, [x23, x28]\n" - "mov v24.16b, v13.16b\n" - "mov v20.16b, v13.16b\n" - "ldr x23, [%[outptrs], 16]\n" - "fmla v3.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "fmla v24.4s, v25.4s, v12.4s\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "ldr q30, [x20, x27]\n" - "fmla v2.4s, v29.4s, v10.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmla v3.4s, v29.4s, v8.4s\n" - "fmla v0.4s, v28.4s, v5.4s\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "ldr q31, [x19, x27]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "ldr q26, [x7, x27]\n" - "fmla v19.4s, v22.4s, v4.4s\n" - "ldr x19, [%[inptrs], 136]\n" - "fmla v3.4s, v27.4s, v10.4s\n" - "ldr q23, [x22, x27]\n" - "fmla v0.4s, v22.4s, v7.4s\n" - "ldr x22, [%[inptrs], 264]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "fmla v16.4s, v22.4s, v8.4s\n" - "str q19, [x25, x28]\n" - "fmla v15.4s, v22.4s, v6.4s\n" - "fmla v21.4s, v22.4s, v9.4s\n" - "ldr q27, [x21, x27]\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "ldr q28, [x20, x27]\n" - "fmla v1.4s, v30.4s, v7.4s\n" - "ldr x21, [%[inptrs], 224]\n" - "fmla v2.4s, v30.4s, v5.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v16.4s, v30.4s, v10.4s\n" - "ldr x25, [%[outptrs], 72]\n" - "str q17, [x24, x28]\n" - "fmla v15.4s, v30.4s, v8.4s\n" - "fmla v18.4s, v30.4s, v6.4s\n" - "ldr q22, [x19, x27]\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "ldr x24, [%[outptrs], 48]\n" - "fmla v24.4s, v30.4s, v9.4s\n" - "fmla v20.4s, v30.4s, v12.4s\n" - "fmla v14.4s, v31.4s, v4.4s\n" - "ldr q30, [x22, x27]\n" - "fmla v2.4s, v31.4s, v7.4s\n" - "ldr q19, [x21, x27]\n" - "fmla v3.4s, v31.4s, v5.4s\n" - "ldr x22, [%[inptrs], 272]\n" - "fmla v15.4s, v31.4s, v10.4s\n" - "ldr x21, [%[inptrs], 232]\n" - "str q14, [x23, x28]\n" - "fmla v18.4s, v31.4s, v8.4s\n" - "fmla v24.4s, v31.4s, v11.4s\n" - "ldr q31, [x20, x27]\n" - "fmla v3.4s, v26.4s, v7.4s\n" - "ldr q17, [x22, x27]\n" - "fmla v0.4s, v23.4s, v4.4s\n" - "ldr x22, [%[inptrs], 280]\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "ldr q14, [x21, x27]\n" - "fmla v16.4s, v23.4s, v5.4s\n" - "ldr x23, [%[outptrs], 24]\n" - "fmla v21.4s, v23.4s, v6.4s\n" - "ldr q26, [x22, x27]\n" - "str q0, [x26, x28]\n" - "fmla v1.4s, v27.4s, v4.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "ldr x26, [%[outptrs], 104]\n" - "fmla v16.4s, v27.4s, v7.4s\n" - "add x27, x27, #16\n" - "fmla v21.4s, v27.4s, v8.4s\n" - "fmla v24.4s, v27.4s, v6.4s\n" - "str q1, [x25, x28]\n" - "fmla v20.4s, v27.4s, v9.4s\n" - "fmla v2.4s, v28.4s, v4.4s\n" - "ldr x25, [%[outptrs], 80]\n" - "fmla v15.4s, v28.4s, v7.4s\n" - "fmla v18.4s, v28.4s, v5.4s\n" - "fmla v21.4s, v28.4s, v10.4s\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "fmla v20.4s, v28.4s, v11.4s\n" - "fmla v3.4s, v22.4s, v4.4s\n" - "str q2, [x24, x28]\n" - "fmla v16.4s, v30.4s, v4.4s\n" - "fmla v18.4s, v22.4s, v7.4s\n" - "ldr x24, [%[outptrs], 56]\n" - "fmla v24.4s, v22.4s, v10.4s\n" - "fmla v21.4s, v30.4s, v5.4s\n" - "str q3, [x23, x28]\n" - "fmla v20.4s, v30.4s, v6.4s\n" - "str q16, [x26, x28]\n" - "fmla v15.4s, v19.4s, v4.4s\n" - "fmla v18.4s, v31.4s, v4.4s\n" - "ldr x26, [%[outptrs], 112]\n" - "fmla v21.4s, v19.4s, v7.4s\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v20.4s, v19.4s, v8.4s\n" - "str q15, [x25, x28]\n" - "str q18, [x24, x28]\n" - "ldr x25, [%[outptrs], 88]\n" - "fmla v24.4s, v31.4s, v7.4s\n" - "fmla v21.4s, v17.4s, v4.4s\n" - "fmla v20.4s, v31.4s, v10.4s\n" - "str q21, [x26, x28]\n" - "fmla v20.4s, v17.4s, v5.4s\n" - "ldr x26, [%[outptrs], 120]\n" - "fmla v24.4s, v14.4s, v4.4s\n" - "fmla v20.4s, v14.4s, v7.4s\n" - "str q24, [x25, x28]\n" - "fmla v20.4s, v26.4s, v4.4s\n" - "str q20, [x26, x28]\n" - "add x28, x28, #16\n" - "4:\n" - "cbz x15, 7f\n" - "ldr s13, [%[wbptr]]\n" - "mov v18.16b, v13.16b\n" - "ldr s12, [%[wbptr], #4]\n" - "mov v22.16b, v13.16b\n" - "ldr s11, [%[wbptr], #8]\n" - "mov v23.16b, v13.16b\n" - "ldr s10, [%[wbptr], #12]\n" - "mov v19.16b, v13.16b\n" - "ldr s9, [%[wbptr], #16]\n" - "mov v17.16b, v13.16b\n" - "ldr s8, [%[wbptr], #20]\n" - "mov v14.16b, v13.16b\n" - "ldr s7, [%[wbptr], #24]\n" - "mov v0.16b, v13.16b\n" - "ldr s6, [%[wbptr], #28]\n" - "mov v1.16b, v13.16b\n" - "ldr s5, [%[wbptr], #32]\n" - "mov v2.16b, v13.16b\n" - "ldr s4, [%[wbptr], #36]\n" - "ldr x17, [%[inptrs], 0]\n" - "ldr x7, [%[inptrs], 48]\n" - "ldr x19, [%[inptrs], 96]\n" - "ldr x20, [%[inptrs], 144]\n" - "subs x15, x15, #1\n" - "ldr s29, [x17, x27]\n" - "fmla v18.4s, v29.4s, v12.4s\n" - "ldr s27, [x7, x27]\n" - "ldr s25, [x19, x27]\n" - "ldr x17, [%[inptrs], 8]\n" - "ldr s21, [x20, x27]\n" - "ldr x7, [%[inptrs], 56]\n" - "ldr s28, [x17, x27]\n" - "ldr x19, [%[inptrs], 104]\n" - "ldr s16, [x7, x27]\n" - "ldr x17, [%[inptrs], 16]\n" - "ldr s29, [x19, x27]\n" - "ldr s15, [x17, x27]\n" - "beq 6f\n" - "5:\n" - "mov v3.16b, v13.16b\n" - "ldr x7, [%[inptrs], 64]\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "ldr x17, [%[inptrs], 24]\n" - "fmla v22.4s, v27.4s, v12.4s\n" - "ldr s30, [x7, x27]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr x21, [%[inptrs], 192]\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "ldr x20, [%[inptrs], 152]\n" - "fmla v18.4s, v28.4s, v11.4s\n" - "ldr s24, [x17, x27]\n" - "fmla v22.4s, v25.4s, v9.4s\n" - "ldr x19, [%[inptrs], 112]\n" - "fmla v23.4s, v16.4s, v9.4s\n" - "ldr x7, [%[inptrs], 72]\n" - "fmla v17.4s, v16.4s, v12.4s\n" - "ldr x17, [%[inptrs], 32]\n" - "fmla v18.4s, v25.4s, v6.4s\n" - "ldr s31, [x21, x27]\n" - "fmla v22.4s, v16.4s, v11.4s\n" - "ldr x22, [%[inptrs], 240]\n" - "fmla v23.4s, v15.4s, v11.4s\n" - "ldr x21, [%[inptrs], 200]\n" - "fmla v14.4s, v15.4s, v12.4s\n" - "ldr x23, [%[outptrs], 0]\n" - "fmla v18.4s, v16.4s, v8.4s\n" - "ldr s25, [x20, x27]\n" - "fmla v22.4s, v21.4s, v6.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v19.4s, v21.4s, v9.4s\n" - "ldr x24, [%[outptrs], 32]\n" - "fmla v0.4s, v21.4s, v12.4s\n" - "ldr s21, [x19, x27]\n" - "fmla v18.4s, v15.4s, v10.4s\n" - "ldr s20, [x7, x27]\n" - "fmla v22.4s, v29.4s, v8.4s\n" - "ldr x19, [%[inptrs], 120]\n" - "fmla v23.4s, v29.4s, v6.4s\n" - "ldr x7, [%[inptrs], 80]\n" - "fmla v19.4s, v29.4s, v11.4s\n" - "ldr x25, [%[outptrs], 64]\n" - "fmla v18.4s, v29.4s, v5.4s\n" - "ldr x26, [%[outptrs], 96]\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "ldr s26, [x17, x27]\n" - "fmla v22.4s, v30.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "ldr x17, [%[inptrs], 40]\n" - "fmla v23.4s, v30.4s, v8.4s\n" - "subs x15, x15, #1\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "fmla v14.4s, v30.4s, v9.4s\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "ldr s27, [x22, x27]\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "ldr x22, [%[inptrs], 248]\n" - "fmla v23.4s, v24.4s, v10.4s\n" - "fmla v19.4s, v31.4s, v6.4s\n" - "fmla v14.4s, v24.4s, v11.4s\n" - "ldr s30, [x21, x27]\n" - "fmla v0.4s, v31.4s, v9.4s\n" - "ldr s24, [x20, x27]\n" - "fmla v22.4s, v25.4s, v5.4s\n" - "ldr x21, [%[inptrs], 208]\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "fmla v1.4s, v25.4s, v9.4s\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v21.4s, v4.4s\n" - "fmla v22.4s, v21.4s, v7.4s\n" - "fmla v23.4s, v21.4s, v5.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v14.4s, v21.4s, v6.4s\n" - "fmla v17.4s, v21.4s, v8.4s\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "str s18, [x23, x28]\n" - "mov v16.16b, v13.16b\n" - "fmla v2.4s, v21.4s, v9.4s\n" - "ldr x23, [%[outptrs], 8]\n" - "fmla v23.4s, v20.4s, v7.4s\n" - "fmla v14.4s, v20.4s, v8.4s\n" - "fmla v16.4s, v25.4s, v12.4s\n" - "ldr s25, [x19, x27]\n" - "fmla v17.4s, v20.4s, v10.4s\n" - "ldr x19, [%[inptrs], 128]\n" - "fmla v2.4s, v20.4s, v11.4s\n" - "fmla v3.4s, v20.4s, v9.4s\n" - "fmla v14.4s, v26.4s, v10.4s\n" - "fmla v0.4s, v27.4s, v6.4s\n" - "mov v15.16b, v13.16b\n" - "fmla v19.4s, v30.4s, v5.4s\n" - "fmla v1.4s, v30.4s, v6.4s\n" - "fmla v16.4s, v30.4s, v9.4s\n" - "fmla v3.4s, v26.4s, v11.4s\n" - "ldr s29, [x7, x27]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "ldr s27, [x17, x27]\n" - "fmla v0.4s, v30.4s, v8.4s\n" - "ldr s28, [x22, x27]\n" - "fmla v22.4s, v24.4s, v4.4s\n" - "ldr x7, [%[inptrs], 88]\n" - "fmla v19.4s, v24.4s, v7.4s\n" - "ldr x22, [%[inptrs], 256]\n" - "fmla v17.4s, v24.4s, v5.4s\n" - "ldr x17, [%[inptrs], 0]\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "fmla v1.4s, v24.4s, v8.4s\n" - "str s22, [x24, x28]\n" - "mov v18.16b, v13.16b\n" - "fmla v2.4s, v24.4s, v6.4s\n" - "ldr x24, [%[outptrs], 40]\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "fmla v18.4s, v20.4s, v12.4s\n" - "ldr s22, [x21, x27]\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "ldr x21, [%[inptrs], 216]\n" - "fmla v17.4s, v25.4s, v7.4s\n" - "fmla v14.4s, v25.4s, v5.4s\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "fmla v2.4s, v25.4s, v8.4s\n" - "fmla v3.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "str s23, [x23, x28]\n" - "mov v21.16b, v13.16b\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "ldr x23, [%[outptrs], 16]\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "fmla v2.4s, v29.4s, v10.4s\n" - "fmla v21.4s, v24.4s, v12.4s\n" - "ldr s30, [x20, x27]\n" - "fmla v3.4s, v29.4s, v8.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "ldr s31, [x19, x27]\n" - "fmla v0.4s, v28.4s, v5.4s\n" - "ldr x19, [%[inptrs], 136]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "ldr s26, [x7, x27]\n" - "fmla v3.4s, v27.4s, v10.4s\n" - "ldr s23, [x22, x27]\n" - "fmla v19.4s, v22.4s, v4.4s\n" - "ldr x22, [%[inptrs], 264]\n" - "fmla v0.4s, v22.4s, v7.4s\n" - "ldr x7, [%[inptrs], 48]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "fmla v16.4s, v22.4s, v8.4s\n" - "fmla v15.4s, v22.4s, v6.4s\n" - "fmla v21.4s, v22.4s, v9.4s\n" - "str s19, [x25, x28]\n" - "mov v24.16b, v13.16b\n" - "mov v20.16b, v13.16b\n" - "ldr s27, [x21, x27]\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "ldr x21, [%[inptrs], 224]\n" - "fmla v24.4s, v25.4s, v12.4s\n" - "ldr s28, [x20, x27]\n" - "fmla v1.4s, v30.4s, v7.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v2.4s, v30.4s, v5.4s\n" - "ldr x25, [%[outptrs], 72]\n" - "str s17, [x24, x28]\n" - "fmla v16.4s, v30.4s, v10.4s\n" - "fmla v15.4s, v30.4s, v8.4s\n" - "ldr s22, [x19, x27]\n" - "fmla v18.4s, v30.4s, v6.4s\n" - "ldr x24, [%[outptrs], 48]\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "ldr x19, [%[inptrs], 96]\n" - "fmla v24.4s, v30.4s, v9.4s\n" - "fmla v20.4s, v30.4s, v12.4s\n" - "fmla v14.4s, v31.4s, v4.4s\n" - "ldr s30, [x22, x27]\n" - "fmla v2.4s, v31.4s, v7.4s\n" - "ldr s19, [x21, x27]\n" - "fmla v3.4s, v31.4s, v5.4s\n" - "ldr x22, [%[inptrs], 272]\n" - "fmla v15.4s, v31.4s, v10.4s\n" - "ldr x21, [%[inptrs], 232]\n" - "str s14, [x23, x28]\n" - "fmla v18.4s, v31.4s, v8.4s\n" - "fmla v24.4s, v31.4s, v11.4s\n" - "ldr s31, [x20, x27]\n" - "fmla v3.4s, v26.4s, v7.4s\n" - "ldr s17, [x22, x27]\n" - "fmla v0.4s, v23.4s, v4.4s\n" - "ldr x22, [%[inptrs], 280]\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "ldr s14, [x21, x27]\n" - "fmla v16.4s, v23.4s, v5.4s\n" - "ldr x23, [%[outptrs], 24]\n" - "fmla v21.4s, v23.4s, v6.4s\n" - "ldr s26, [x22, x27]\n" - "str s0, [x26, x28]\n" - "fmla v1.4s, v27.4s, v4.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "ldr s13, [%[wbptr]]\n" - "fmla v16.4s, v27.4s, v7.4s\n" - "ldr x26, [%[outptrs], 104]\n" - "fmla v21.4s, v27.4s, v8.4s\n" - "add x27, x27, #4\n" - "str s1, [x25, x28]\n" - "fmla v24.4s, v27.4s, v6.4s\n" - "fmla v20.4s, v27.4s, v9.4s\n" - "ldr s12, [%[wbptr], #4]\n" - "fmla v2.4s, v28.4s, v4.4s\n" - "ldr s29, [x17, x27]\n" - "fmla v15.4s, v28.4s, v7.4s\n" - "ldr s27, [x7, x27]\n" - "fmla v18.4s, v28.4s, v5.4s\n" - "ldr x25, [%[outptrs], 80]\n" - "fmla v21.4s, v28.4s, v10.4s\n" - "ldr x17, [%[inptrs], 8]\n" - "str s2, [x24, x28]\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "fmla v20.4s, v28.4s, v11.4s\n" - "ldr s9, [%[wbptr], #16]\n" - "fmla v3.4s, v22.4s, v4.4s\n" - "ldr s28, [x17, x27]\n" - "fmla v18.4s, v22.4s, v7.4s\n" - "ldr s25, [x19, x27]\n" - "fmla v24.4s, v22.4s, v10.4s\n" - "ldr x24, [%[outptrs], 56]\n" - "fmla v16.4s, v30.4s, v4.4s\n" - "ldr s11, [%[wbptr], #8]\n" - "str s3, [x23, x28]\n" - "fmla v21.4s, v30.4s, v5.4s\n" - "fmla v20.4s, v30.4s, v6.4s\n" - "ldr x7, [%[inptrs], 56]\n" - "fmla v15.4s, v19.4s, v4.4s\n" - "ldr x17, [%[inptrs], 16]\n" - "str s16, [x26, x28]\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v21.4s, v19.4s, v7.4s\n" - "ldr s16, [x7, x27]\n" - "fmla v20.4s, v19.4s, v8.4s\n" - "ldr s6, [%[wbptr], #28]\n" - "str s15, [x25, x28]\n" - "fmla v18.4s, v31.4s, v4.4s\n" - "fmla v24.4s, v31.4s, v7.4s\n" - "ldr s15, [x17, x27]\n" - "fmla v21.4s, v17.4s, v4.4s\n" - "ldr x25, [%[outptrs], 88]\n" - "fmla v20.4s, v31.4s, v10.4s\n" - "ldr s8, [%[wbptr], #20]\n" - "str s18, [x24, x28]\n" - "mov v18.16b, v13.16b\n" - "fmla v24.4s, v14.4s, v4.4s\n" - "ldr x26, [%[outptrs], 112]\n" - "mov v22.16b, v13.16b\n" - "ldr x20, [%[inptrs], 144]\n" - "str s21, [x26, x28]\n" - "fmla v20.4s, v17.4s, v5.4s\n" - "mov v23.16b, v13.16b\n" - "ldr s10, [%[wbptr], #12]\n" - "str s24, [x25, x28]\n" - "mov v19.16b, v13.16b\n" - "mov v17.16b, v13.16b\n" - "ldr s21, [x20, x27]\n" - "fmla v20.4s, v14.4s, v7.4s\n" - "ldr s5, [%[wbptr], #32]\n" - "mov v14.16b, v13.16b\n" - "ldr x26, [%[outptrs], 120]\n" - "mov v0.16b, v13.16b\n" - "ldr x19, [%[inptrs], 104]\n" - "mov v1.16b, v13.16b\n" - "mov v2.16b, v13.16b\n" - "fmla v20.4s, v26.4s, v4.4s\n" - "ldr s7, [%[wbptr], #24]\n" - "fmla v18.4s, v29.4s, v12.4s\n" - "ldr s29, [x19, x27]\n" - "str s20, [x26, x28]\n" - "ldr s4, [%[wbptr], #36]\n" - "add x28, x28, #4\n" - "bne 5b\n" - "6:\n" - "mov v3.16b, v13.16b\n" - "ldr x7, [%[inptrs], 64]\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "ldr x17, [%[inptrs], 24]\n" - "fmla v22.4s, v27.4s, v12.4s\n" - "ldr s30, [x7, x27]\n" - "fmla v23.4s, v28.4s, v12.4s\n" - "ldr x21, [%[inptrs], 192]\n" - "fmla v19.4s, v25.4s, v12.4s\n" - "ldr x20, [%[inptrs], 152]\n" - "fmla v18.4s, v28.4s, v11.4s\n" - "ldr s24, [x17, x27]\n" - "fmla v22.4s, v25.4s, v9.4s\n" - "ldr x19, [%[inptrs], 112]\n" - "fmla v23.4s, v16.4s, v9.4s\n" - "ldr x7, [%[inptrs], 72]\n" - "fmla v17.4s, v16.4s, v12.4s\n" - "ldr x17, [%[inptrs], 32]\n" - "fmla v18.4s, v25.4s, v6.4s\n" - "ldr s31, [x21, x27]\n" - "fmla v22.4s, v16.4s, v11.4s\n" - "ldr x22, [%[inptrs], 240]\n" - "fmla v23.4s, v15.4s, v11.4s\n" - "ldr x21, [%[inptrs], 200]\n" - "fmla v14.4s, v15.4s, v12.4s\n" - "ldr x23, [%[outptrs], 0]\n" - "fmla v18.4s, v16.4s, v8.4s\n" - "ldr s25, [x20, x27]\n" - "fmla v22.4s, v21.4s, v6.4s\n" - "ldr x20, [%[inptrs], 160]\n" - "fmla v19.4s, v21.4s, v9.4s\n" - "ldr x24, [%[outptrs], 32]\n" - "fmla v0.4s, v21.4s, v12.4s\n" - "ldr s21, [x19, x27]\n" - "fmla v18.4s, v15.4s, v10.4s\n" - "ldr s20, [x7, x27]\n" - "fmla v22.4s, v29.4s, v8.4s\n" - "ldr x19, [%[inptrs], 120]\n" - "fmla v23.4s, v29.4s, v6.4s\n" - "ldr x7, [%[inptrs], 80]\n" - "fmla v19.4s, v29.4s, v11.4s\n" - "ldr x25, [%[outptrs], 64]\n" - "fmla v18.4s, v29.4s, v5.4s\n" - "ldr x26, [%[outptrs], 96]\n" - "fmla v17.4s, v29.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "ldr s26, [x17, x27]\n" - "fmla v22.4s, v30.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v18.4s, v30.4s, v7.4s\n" - "ldr x17, [%[inptrs], 40]\n" - "fmla v23.4s, v30.4s, v8.4s\n" - "fmla v17.4s, v30.4s, v11.4s\n" - "fmla v14.4s, v30.4s, v9.4s\n" - "fmla v2.4s, v30.4s, v12.4s\n" - "mov v16.16b, v13.16b\n" - "fmla v3.4s, v24.4s, v12.4s\n" - "fmla v19.4s, v31.4s, v6.4s\n" - "fmla v0.4s, v31.4s, v9.4s\n" - "mov v15.16b, v13.16b\n" - "fmla v23.4s, v24.4s, v10.4s\n" - "fmla v14.4s, v24.4s, v11.4s\n" - "ldr s27, [x22, x27]\n" - "fmla v22.4s, v25.4s, v5.4s\n" - "ldr x22, [%[inptrs], 248]\n" - "fmla v19.4s, v25.4s, v8.4s\n" - "fmla v17.4s, v25.4s, v6.4s\n" - "fmla v0.4s, v25.4s, v11.4s\n" - "fmla v1.4s, v25.4s, v9.4s\n" - "fmla v16.4s, v25.4s, v12.4s\n" - "ldr s30, [x21, x27]\n" - "fmla v18.4s, v21.4s, v4.4s\n" - "ldr x21, [%[inptrs], 208]\n" - "fmla v22.4s, v21.4s, v7.4s\n" - "fmla v23.4s, v21.4s, v5.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v17.4s, v21.4s, v8.4s\n" - "fmla v14.4s, v21.4s, v6.4s\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "str s18, [x23, x28]\n" - "mov v18.16b, v13.16b\n" - "fmla v2.4s, v21.4s, v9.4s\n" - "ldr x23, [%[outptrs], 8]\n" - "fmla v15.4s, v21.4s, v12.4s\n" - "ldr s24, [x20, x27]\n" - "fmla v23.4s, v20.4s, v7.4s\n" - "ldr x20, [%[inptrs], 168]\n" - "fmla v17.4s, v20.4s, v10.4s\n" - "fmla v14.4s, v20.4s, v8.4s\n" - "fmla v2.4s, v20.4s, v11.4s\n" - "fmla v3.4s, v20.4s, v9.4s\n" - "fmla v18.4s, v20.4s, v12.4s\n" - "ldr s25, [x19, x27]\n" - "fmla v0.4s, v27.4s, v6.4s\n" - "ldr s29, [x7, x27]\n" - "fmla v14.4s, v26.4s, v10.4s\n" - "ldr x19, [%[inptrs], 128]\n" - "fmla v3.4s, v26.4s, v11.4s\n" - "ldr s27, [x17, x27]\n" - "fmla v19.4s, v30.4s, v5.4s\n" - "ldr x7, [%[inptrs], 88]\n" - "fmla v0.4s, v30.4s, v8.4s\n" - "fmla v1.4s, v30.4s, v6.4s\n" - "fmla v16.4s, v30.4s, v9.4s\n" - "ldr s28, [x22, x27]\n" - "fmla v22.4s, v24.4s, v4.4s\n" - "ldr x22, [%[inptrs], 256]\n" - "fmla v19.4s, v24.4s, v7.4s\n" - "fmla v17.4s, v24.4s, v5.4s\n" - "fmla v0.4s, v24.4s, v10.4s\n" - "fmla v1.4s, v24.4s, v8.4s\n" - "fmla v2.4s, v24.4s, v6.4s\n" - "fmla v16.4s, v24.4s, v11.4s\n" - "str s22, [x24, x28]\n" - "mov v21.16b, v13.16b\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "ldr x24, [%[outptrs], 40]\n" - "fmla v23.4s, v25.4s, v4.4s\n" - "fmla v17.4s, v25.4s, v7.4s\n" - "fmla v21.4s, v24.4s, v12.4s\n" - "ldr s22, [x21, x27]\n" - "fmla v14.4s, v25.4s, v5.4s\n" - "ldr x21, [%[inptrs], 216]\n" - "fmla v1.4s, v25.4s, v10.4s\n" - "fmla v2.4s, v25.4s, v8.4s\n" - "str s23, [x23, x28]\n" - "mov v24.16b, v13.16b\n" - "mov v20.16b, v13.16b\n" - "ldr x23, [%[outptrs], 16]\n" - "fmla v3.4s, v25.4s, v6.4s\n" - "fmla v15.4s, v25.4s, v11.4s\n" - "fmla v18.4s, v25.4s, v9.4s\n" - "fmla v24.4s, v25.4s, v12.4s\n" - "fmla v14.4s, v29.4s, v7.4s\n" - "ldr s30, [x20, x27]\n" - "fmla v2.4s, v29.4s, v10.4s\n" - "ldr x20, [%[inptrs], 176]\n" - "fmla v3.4s, v29.4s, v8.4s\n" - "fmla v0.4s, v28.4s, v5.4s\n" - "fmla v18.4s, v29.4s, v11.4s\n" - "ldr s31, [x19, x27]\n" - "fmla v16.4s, v28.4s, v6.4s\n" - "ldr s26, [x7, x27]\n" - "fmla v19.4s, v22.4s, v4.4s\n" - "ldr x19, [%[inptrs], 136]\n" - "fmla v3.4s, v27.4s, v10.4s\n" - "ldr s23, [x22, x27]\n" - "fmla v0.4s, v22.4s, v7.4s\n" - "ldr x22, [%[inptrs], 264]\n" - "fmla v1.4s, v22.4s, v5.4s\n" - "fmla v16.4s, v22.4s, v8.4s\n" - "str s19, [x25, x28]\n" - "fmla v15.4s, v22.4s, v6.4s\n" - "fmla v21.4s, v22.4s, v9.4s\n" - "ldr s27, [x21, x27]\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "ldr s28, [x20, x27]\n" - "fmla v1.4s, v30.4s, v7.4s\n" - "ldr x21, [%[inptrs], 224]\n" - "fmla v2.4s, v30.4s, v5.4s\n" - "ldr x20, [%[inptrs], 184]\n" - "fmla v16.4s, v30.4s, v10.4s\n" - "ldr x25, [%[outptrs], 72]\n" - "str s17, [x24, x28]\n" - "fmla v15.4s, v30.4s, v8.4s\n" - "fmla v18.4s, v30.4s, v6.4s\n" - "ldr s22, [x19, x27]\n" - "fmla v21.4s, v30.4s, v11.4s\n" - "ldr x24, [%[outptrs], 48]\n" - "fmla v24.4s, v30.4s, v9.4s\n" - "fmla v20.4s, v30.4s, v12.4s\n" - "fmla v14.4s, v31.4s, v4.4s\n" - "ldr s30, [x22, x27]\n" - "fmla v2.4s, v31.4s, v7.4s\n" - "ldr s19, [x21, x27]\n" - "fmla v3.4s, v31.4s, v5.4s\n" - "ldr x22, [%[inptrs], 272]\n" - "fmla v15.4s, v31.4s, v10.4s\n" - "ldr x21, [%[inptrs], 232]\n" - "str s14, [x23, x28]\n" - "fmla v18.4s, v31.4s, v8.4s\n" - "fmla v24.4s, v31.4s, v11.4s\n" - "ldr s31, [x20, x27]\n" - "fmla v3.4s, v26.4s, v7.4s\n" - "ldr s17, [x22, x27]\n" - "fmla v0.4s, v23.4s, v4.4s\n" - "ldr x22, [%[inptrs], 280]\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "ldr s14, [x21, x27]\n" - "fmla v16.4s, v23.4s, v5.4s\n" - "ldr x23, [%[outptrs], 24]\n" - "fmla v21.4s, v23.4s, v6.4s\n" - "ldr s26, [x22, x27]\n" - "str s0, [x26, x28]\n" - "fmla v1.4s, v27.4s, v4.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "ldr x26, [%[outptrs], 104]\n" - "fmla v16.4s, v27.4s, v7.4s\n" - "add x27, x27, #4\n" - "fmla v21.4s, v27.4s, v8.4s\n" - "fmla v24.4s, v27.4s, v6.4s\n" - "str s1, [x25, x28]\n" - "fmla v20.4s, v27.4s, v9.4s\n" - "fmla v2.4s, v28.4s, v4.4s\n" - "ldr x25, [%[outptrs], 80]\n" - "fmla v15.4s, v28.4s, v7.4s\n" - "fmla v18.4s, v28.4s, v5.4s\n" - "fmla v21.4s, v28.4s, v10.4s\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "fmla v20.4s, v28.4s, v11.4s\n" - "fmla v3.4s, v22.4s, v4.4s\n" - "str s2, [x24, x28]\n" - "fmla v16.4s, v30.4s, v4.4s\n" - "fmla v18.4s, v22.4s, v7.4s\n" - "ldr x24, [%[outptrs], 56]\n" - "fmla v24.4s, v22.4s, v10.4s\n" - "fmla v21.4s, v30.4s, v5.4s\n" - "str s3, [x23, x28]\n" - "fmla v20.4s, v30.4s, v6.4s\n" - "str s16, [x26, x28]\n" - "fmla v15.4s, v19.4s, v4.4s\n" - "fmla v18.4s, v31.4s, v4.4s\n" - "ldr x26, [%[outptrs], 112]\n" - "fmla v21.4s, v19.4s, v7.4s\n" - "fmla v24.4s, v19.4s, v5.4s\n" - "fmla v20.4s, v19.4s, v8.4s\n" - "str s15, [x25, x28]\n" - "str s18, [x24, x28]\n" - "ldr x25, [%[outptrs], 88]\n" - "fmla v24.4s, v31.4s, v7.4s\n" - "fmla v21.4s, v17.4s, v4.4s\n" - "fmla v20.4s, v31.4s, v10.4s\n" - "str s21, [x26, x28]\n" - "fmla v20.4s, v17.4s, v5.4s\n" - "ldr x26, [%[outptrs], 120]\n" - "fmla v24.4s, v14.4s, v4.4s\n" - "fmla v20.4s, v14.4s, v7.4s\n" - "str s24, [x25, x28]\n" - "fmla v20.4s, v26.4s, v4.4s\n" - "str s20, [x26, x28]\n" - "add x28, x28, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr) - : [n_channels] "r" ((long) n_channels), [outptrs] "r" (outptrs), [inptrs] "r" (inptrs) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x15", "x16", "x17", "x7", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x9, %[inptr0], %[input_row_stride]\n" - "add x28, %[input_col_stride1], %[input_col_stride1]\n" - "add x16, %[outptr0], %[output_row_stride]\n" - "add x24, x9, %[input_row_stride]\n" - "add x25, x28, #64\n" - "add x23, x28, %[input_col_stride1]\n" - "add x26, x24, %[input_row_stride]\n" - "add x11, x23, #64\n" - "add x12, x23, %[input_col_stride1]\n" - "add x10, x26, %[input_row_stride]\n" - "add x13, x12, #64\n" - "add x14, x12, %[input_col_stride1]\n" - "add x27, x10, %[input_row_stride]\n" - "add x15, x14, #64\n" - "add x17, x16, %[output_row_stride]\n" - "add x7, x17, %[output_row_stride]\n" - "add x19, %[output_col_stride1], %[output_col_stride1]\n" - "and x21, %[n_channels], #3\n" - "add x20, x19, %[output_col_stride1]\n" - "lsr x22, %[n_channels], #2\n" - "cbz x22, 4f\n" - "1:\n" - "ldr q21, [%[wbptr]]\n" - "subs x22, x22, #1\n" - "mov v7.16b, v21.16b\n" - "ldr q20, [%[wbptr], #16]\n" - "mov v3.16b, v21.16b\n" - "ldr q14, [%[wbptr], #32]\n" - "mov v6.16b, v21.16b\n" - "ldr q13, [%[wbptr], #48]\n" - "mov v15.16b, v21.16b\n" - "ldr q17, [%[wbptr], #64]\n" - "mov v2.16b, v21.16b\n" - "ldr q12, [%[wbptr], #80]\n" - "mov v5.16b, v21.16b\n" - "ldr q11, [%[wbptr], #96]\n" - "mov v0.16b, v21.16b\n" - "ldr q10, [%[wbptr], #112]\n" - "mov v16.16b, v21.16b\n" - "ldr q9, [%[wbptr], #128]\n" - "mov v1.16b, v21.16b\n" - "ldr q8, [%[wbptr], #144]\n" - "mov v4.16b, v21.16b\n" - "ldr q22, [%[inptr0]]\n" - "fmla v7.4s, v22.4s, v20.4s\n" - "ldr q19, [x9]\n" - "fmla v3.4s, v19.4s, v20.4s\n" - "ldr q23, [%[inptr0], %[input_col_stride1]]\n" - "fmla v6.4s, v23.4s, v20.4s\n" - "ldr q18, [x24]\n" - "fmla v7.4s, v19.4s, v17.4s\n" - "ldr q27, [x9, %[input_col_stride1]]\n" - "fmla v3.4s, v18.4s, v17.4s\n" - "ldr q28, [%[inptr0], x28]\n" - "fmla v15.4s, v18.4s, v20.4s\n" - "ldr q25, [x26]\n" - "fmla v7.4s, v23.4s, v14.4s\n" - "ldr q22, [x24, %[input_col_stride1]]\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x9, #64]\n" - "prfm pldl1keep, [%[inptr0], x8]\n" - "fmla v7.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "prfm pldl1keep, [x9, x8]\n" - "prfm pldl1keep, [%[inptr0], x25]\n" - "prfm pldl1keep, [x26, #64]\n" - "prfm pldl1keep, [x24, x8]\n" - "fmla v7.4s, v27.4s, v12.4s\n" - "beq 3f\n" - "2:\n" - "mov v18.16b, v21.16b\n" - "ldr q23, [x9, x28]\n" - "mov v19.16b, v21.16b\n" - "prfm pldl1keep, [x9, x25]\n" - "fmla v6.4s, v27.4s, v17.4s\n" - "prfm pldl1keep, [%[inptr0], x11]\n" - "fmla v2.4s, v27.4s, v20.4s\n" - "ldr q24, [%[inptr0], x23]\n" - "fmla v7.4s, v28.4s, v13.4s\n" - "prfm pldl1keep, [x10, #64]\n" - "fmla v6.4s, v28.4s, v14.4s\n" - "prfm pldl1keep, [x26, x8]\n" - "fmla v5.4s, v28.4s, v20.4s\n" - "ldr q26, [x10]\n" - "fmla v3.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x25]\n" - "fmla v15.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x9, x11]\n" - "fmla v0.4s, v25.4s, v20.4s\n" - "ldr q25, [x26, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x13]\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "prfm pldl1keep, [x27, #64]\n" - "fmla v6.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x10, x8]\n" - "fmla v15.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x26, x25]\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "prfm pldl1keep, [x24, x11]\n" - "fmla v16.4s, v22.4s, v20.4s\n" - "ldr q22, [x24, x28]\n" - "fmla v7.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [x9, x13]\n" - "fmla v3.4s, v23.4s, v13.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v6.4s, v23.4s, v12.4s\n" - "prfm pldl1keep, [x27, x8]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "prfm pldl1keep, [x10, x25]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "prfm pldl1keep, [x26, x11]\n" - "fmla v1.4s, v23.4s, v20.4s\n" - "ldr q23, [x9, x23]\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x24, x13]\n" - "fmla v5.4s, v24.4s, v14.4s\n" - "prfm pldl1keep, [x9, x15]\n" - "fmla v4.4s, v24.4s, v20.4s\n" - "ldr q24, [%[inptr0], x12]\n" - "fmla v15.4s, v26.4s, v10.4s\n" - "prfm pldl1keep, [x27, x25]\n" - "fmla v0.4s, v26.4s, v17.4s\n" - "ldr q29, [x27]\n" - "fmla v3.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x10, x11]\n" - "fmla v15.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x26, x13]\n" - "fmla v2.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "prfm pldl1keep, [x27, x11]\n" - "fmla v16.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x10, x13]\n" - "fmla v18.4s, v25.4s, v20.4s\n" - "ldr q26, [x10, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x26, x15]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x27, x13]\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x15]\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x27, x15]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v5.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v22.4s, v14.4s\n" - "subs x22, x22, #1\n" - "fmla v1.4s, v22.4s, v17.4s\n" - "fmla v19.4s, v22.4s, v20.4s\n" - "mov v22.16b, v21.16b\n" - "fmla v6.4s, v23.4s, v11.4s\n" - "fmla v2.4s, v23.4s, v13.4s\n" - "fmla v5.4s, v23.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v14.4s\n" - "fmla v4.4s, v23.4s, v17.4s\n" - "fmla v22.4s, v23.4s, v20.4s\n" - "ldr q27, [x26, x28]\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "fmla v0.4s, v29.4s, v10.4s\n" - "mov v23.16b, v21.16b\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "mov v25.16b, v21.16b\n" - "mov v24.16b, v21.16b\n" - "fmla v15.4s, v26.4s, v9.4s\n" - "fmla v0.4s, v26.4s, v12.4s\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "fmla v18.4s, v26.4s, v17.4s\n" - "fmla v3.4s, v27.4s, v8.4s\n" - "ldr q29, [x24, x23]\n" - "fmla v15.4s, v27.4s, v11.4s\n" - "fmla v2.4s, v27.4s, v9.4s\n" - "fmla v0.4s, v27.4s, v13.4s\n" - "fmla v16.4s, v27.4s, v12.4s\n" - "fmla v1.4s, v27.4s, v10.4s\n" - "fmla v18.4s, v27.4s, v14.4s\n" - "fmla v19.4s, v27.4s, v17.4s\n" - "fmla v23.4s, v27.4s, v20.4s\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "ldr q28, [x9, x12]\n" - "fmla v2.4s, v29.4s, v11.4s\n" - "fmla v5.4s, v29.4s, v9.4s\n" - "fmla v16.4s, v29.4s, v13.4s\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "fmla v4.4s, v29.4s, v10.4s\n" - "fmla v19.4s, v29.4s, v14.4s\n" - "fmla v22.4s, v29.4s, v17.4s\n" - "fmla v25.4s, v29.4s, v20.4s\n" - "fmla v5.4s, v28.4s, v11.4s\n" - "ldr q21, [%[inptr0], x14]\n" - "fmla v1.4s, v28.4s, v13.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v22.4s, v28.4s, v14.4s\n" - "ldr q26, [x27, %[input_col_stride1]]\n" - "fmla v0.4s, v26.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x8]\n" - "fmla v4.4s, v21.4s, v13.4s\n" - "ldr q21, [x10, x28]\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "ldr q29, [x26, x23]\n" - "fmla v15.4s, v21.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x25]\n" - "fmla v0.4s, v21.4s, v11.4s\n" - "fmla v16.4s, v21.4s, v9.4s\n" - "fmla v18.4s, v21.4s, v12.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v23.4s, v21.4s, v17.4s\n" - "ldr q21, [x24, x12]\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "fmla v16.4s, v29.4s, v11.4s\n" - "fmla v1.4s, v29.4s, v9.4s\n" - "fmla v18.4s, v29.4s, v13.4s\n" - "fmla v19.4s, v29.4s, v12.4s\n" - "fmla v22.4s, v29.4s, v10.4s\n" - "fmla v23.4s, v29.4s, v14.4s\n" - "fmla v25.4s, v29.4s, v17.4s\n" - "fmla v24.4s, v29.4s, v20.4s\n" - "ldr q28, [x9, x14]\n" - "fmla v5.4s, v21.4s, v8.4s\n" - "ldr q27, [x27, x28]\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "add x9, x9, #16\n" - "fmla v4.4s, v21.4s, v9.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "fmla v19.4s, v21.4s, v13.4s\n" - "prfm pldl1keep, [x9, x8]\n" - "fmla v22.4s, v21.4s, v12.4s\n" - "fmla v25.4s, v21.4s, v14.4s\n" - "fmla v4.4s, v28.4s, v11.4s\n" - "ldr q20, [x10, x23]\n" - "fmla v0.4s, v27.4s, v8.4s\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "fmla v22.4s, v28.4s, v13.4s\n" - "ldr q26, [x26, x12]\n" - "fmla v23.4s, v27.4s, v10.4s\n" - "ldr q21, [x24, x14]\n" - "fmla v16.4s, v20.4s, v8.4s\n" - "add x24, x24, #16\n" - "fmla v18.4s, v20.4s, v11.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v19.4s, v20.4s, v9.4s\n" - "prfm pldl1keep, [x24, x8]\n" - "fmla v23.4s, v20.4s, v12.4s\n" - "fmla v25.4s, v20.4s, v10.4s\n" - "fmla v24.4s, v20.4s, v17.4s\n" - "ldr q28, [x27, x23]\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "ldr q20, [x10, x12]\n" - "fmla v19.4s, v26.4s, v11.4s\n" - "fmla v22.4s, v26.4s, v9.4s\n" - "fmla v23.4s, v26.4s, v13.4s\n" - "fmla v25.4s, v26.4s, v12.4s\n" - "fmla v24.4s, v26.4s, v14.4s\n" - "ldr q17, [x26, x14]\n" - "fmla v4.4s, v21.4s, v8.4s\n" - "ldr q26, [x27, x12]\n" - "fmla v22.4s, v21.4s, v11.4s\n" - "add x26, x26, #16\n" - "fmla v25.4s, v21.4s, v13.4s\n" - "ldr q27, [x10, x14]\n" - "fmla v18.4s, v28.4s, v8.4s\n" - "prfm pldl1keep, [x26, #64]\n" - "fmla v23.4s, v28.4s, v9.4s\n" - "add x10, x10, #16\n" - "fmla v24.4s, v28.4s, v10.4s\n" - "ldr q28, [x27, x14]\n" - "fmla v19.4s, v20.4s, v8.4s\n" - "ldr q21, [%[wbptr]]\n" - "fmla v23.4s, v20.4s, v11.4s\n" - "add x27, x27, #16\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "fmla v24.4s, v20.4s, v12.4s\n" - "fmla v22.4s, v17.4s, v8.4s\n" - "ldr q20, [%[wbptr], #16]\n" - "fmla v23.4s, v26.4s, v8.4s\n" - "ldr q14, [%[wbptr], #32]\n" - "fmla v24.4s, v17.4s, v13.4s\n" - "movi v29.16b, #0\n" - "fmla v25.4s, v17.4s, v11.4s\n" - "ldr q17, [%[wbptr], #64]\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "ldr q13, [%[wbptr], #48]\n" - "str q7, [%[outptr0]]\n" - "fmla v25.4s, v27.4s, v8.4s\n" - "str q6, [%[outptr0], %[output_col_stride1]]\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "ldr q12, [%[wbptr], #80]\n" - "str q5, [%[outptr0], x19]\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "ldr q10, [%[wbptr], #112]\n" - "str q4, [%[outptr0], x20]\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "str q3, [x16]\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "ldr q11, [%[wbptr], #96]\n" - "str q2, [x16, %[output_col_stride1]]\n" - "fmax v22.4s, v22.4s, v29.4s\n" - "str q1, [x16, x19]\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "str q22, [x16, x20]\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "str q15, [x17]\n" - "fmax v19.4s, v19.4s, v29.4s\n" - "str q16, [x17, %[output_col_stride1]]\n" - "fmax v25.4s, v25.4s, v29.4s\n" - "str q19, [x17, x19]\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "str q25, [x17, x20]\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "str q0, [x7]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "str q18, [x7, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "str q23, [x7, x19]\n" - "mov v7.16b, v21.16b\n" - "str q24, [x7, x20]\n" - "mov v3.16b, v21.16b\n" - "mov v6.16b, v21.16b\n" - "ldr q9, [%[wbptr], #128]\n" - "mov v15.16b, v21.16b\n" - "ldr q8, [%[wbptr], #144]\n" - "mov v2.16b, v21.16b\n" - "ldr q22, [%[inptr0]]\n" - "mov v5.16b, v21.16b\n" - "ldr q19, [x9]\n" - "mov v0.16b, v21.16b\n" - "ldr q23, [%[inptr0], %[input_col_stride1]]\n" - "mov v16.16b, v21.16b\n" - "ldr q18, [x24]\n" - "mov v1.16b, v21.16b\n" - "ldr q27, [x9, %[input_col_stride1]]\n" - "mov v4.16b, v21.16b\n" - "ldr q28, [%[inptr0], x28]\n" - "fmla v7.4s, v22.4s, v20.4s\n" - "ldr q25, [x26]\n" - "fmla v3.4s, v19.4s, v20.4s\n" - "ldr q22, [x24, %[input_col_stride1]]\n" - "fmla v6.4s, v23.4s, v20.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmla v7.4s, v19.4s, v17.4s\n" - "add x16, x16, #16\n" - "fmla v3.4s, v18.4s, v17.4s\n" - "add x17, x17, #16\n" - "fmla v15.4s, v18.4s, v20.4s\n" - "add x7, x7, #16\n" - "fmla v7.4s, v23.4s, v14.4s\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "fmla v7.4s, v18.4s, v10.4s\n" - "fmla v7.4s, v27.4s, v12.4s\n" - "bne 2b\n" - "3:\n" - "mov v18.16b, v21.16b\n" - "ldr q23, [x9, x28]\n" - "mov v19.16b, v21.16b\n" - "prfm pldl1keep, [x9, x25]\n" - "fmla v6.4s, v27.4s, v17.4s\n" - "prfm pldl1keep, [%[inptr0], x11]\n" - "fmla v2.4s, v27.4s, v20.4s\n" - "ldr q24, [%[inptr0], x23]\n" - "fmla v7.4s, v28.4s, v13.4s\n" - "prfm pldl1keep, [x10, #64]\n" - "fmla v6.4s, v28.4s, v14.4s\n" - "prfm pldl1keep, [x26, x8]\n" - "fmla v5.4s, v28.4s, v20.4s\n" - "ldr q26, [x10]\n" - "fmla v3.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x25]\n" - "fmla v15.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x9, x11]\n" - "fmla v0.4s, v25.4s, v20.4s\n" - "ldr q25, [x26, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x13]\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "prfm pldl1keep, [x27, #64]\n" - "fmla v6.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x10, x8]\n" - "fmla v15.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x26, x25]\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "prfm pldl1keep, [x24, x11]\n" - "fmla v16.4s, v22.4s, v20.4s\n" - "ldr q22, [x24, x28]\n" - "fmla v7.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [x9, x13]\n" - "fmla v3.4s, v23.4s, v13.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v6.4s, v23.4s, v12.4s\n" - "prfm pldl1keep, [x27, x8]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "prfm pldl1keep, [x10, x25]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "prfm pldl1keep, [x26, x11]\n" - "fmla v1.4s, v23.4s, v20.4s\n" - "ldr q23, [x9, x23]\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x24, x13]\n" - "fmla v5.4s, v24.4s, v14.4s\n" - "prfm pldl1keep, [x9, x15]\n" - "fmla v4.4s, v24.4s, v20.4s\n" - "ldr q24, [%[inptr0], x12]\n" - "fmla v15.4s, v26.4s, v10.4s\n" - "prfm pldl1keep, [x27, x25]\n" - "fmla v0.4s, v26.4s, v17.4s\n" - "ldr q29, [x27]\n" - "fmla v3.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x10, x11]\n" - "fmla v15.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x26, x13]\n" - "fmla v2.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "prfm pldl1keep, [x27, x11]\n" - "fmla v16.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x10, x13]\n" - "fmla v18.4s, v25.4s, v20.4s\n" - "ldr q26, [x10, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x26, x15]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x27, x13]\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x15]\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x27, x15]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v5.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v22.4s, v14.4s\n" - "fmla v1.4s, v22.4s, v17.4s\n" - "fmla v19.4s, v22.4s, v20.4s\n" - "ldr q27, [x26, x28]\n" - "fmla v6.4s, v23.4s, v11.4s\n" - "fmla v2.4s, v23.4s, v13.4s\n" - "fmla v5.4s, v23.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v14.4s\n" - "fmla v4.4s, v23.4s, v17.4s\n" - "fmla v0.4s, v29.4s, v10.4s\n" - "mov v22.16b, v21.16b\n" - "fmla v15.4s, v26.4s, v9.4s\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "fmla v22.4s, v23.4s, v20.4s\n" - "ldr q29, [x24, x23]\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "ldr q28, [x9, x12]\n" - "fmla v0.4s, v26.4s, v12.4s\n" - "fmla v18.4s, v26.4s, v17.4s\n" - "mov v23.16b, v21.16b\n" - "fmla v3.4s, v27.4s, v8.4s\n" - "fmla v15.4s, v27.4s, v11.4s\n" - "fmla v2.4s, v27.4s, v9.4s\n" - "fmla v0.4s, v27.4s, v13.4s\n" - "fmla v16.4s, v27.4s, v12.4s\n" - "fmla v1.4s, v27.4s, v10.4s\n" - "fmla v18.4s, v27.4s, v14.4s\n" - "fmla v19.4s, v27.4s, v17.4s\n" - "fmla v23.4s, v27.4s, v20.4s\n" - "mov v25.16b, v21.16b\n" - "mov v24.16b, v21.16b\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "fmla v2.4s, v29.4s, v11.4s\n" - "fmla v5.4s, v29.4s, v9.4s\n" - "fmla v16.4s, v29.4s, v13.4s\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "fmla v4.4s, v29.4s, v10.4s\n" - "fmla v19.4s, v29.4s, v14.4s\n" - "fmla v22.4s, v29.4s, v17.4s\n" - "fmla v25.4s, v29.4s, v20.4s\n" - "ldr q21, [%[inptr0], x14]\n" - "fmla v5.4s, v28.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v1.4s, v28.4s, v13.4s\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "fmla v22.4s, v28.4s, v14.4s\n" - "ldr q26, [x27, %[input_col_stride1]]\n" - "fmla v0.4s, v26.4s, v9.4s\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "fmla v4.4s, v21.4s, v13.4s\n" - "ldr q21, [x10, x28]\n" - "fmla v15.4s, v21.4s, v8.4s\n" - "ldr q29, [x26, x23]\n" - "fmla v0.4s, v21.4s, v11.4s\n" - "fmla v16.4s, v21.4s, v9.4s\n" - "fmla v18.4s, v21.4s, v12.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v23.4s, v21.4s, v17.4s\n" - "ldr q21, [x24, x12]\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "fmla v16.4s, v29.4s, v11.4s\n" - "fmla v1.4s, v29.4s, v9.4s\n" - "fmla v18.4s, v29.4s, v13.4s\n" - "fmla v19.4s, v29.4s, v12.4s\n" - "fmla v22.4s, v29.4s, v10.4s\n" - "fmla v23.4s, v29.4s, v14.4s\n" - "fmla v25.4s, v29.4s, v17.4s\n" - "fmla v24.4s, v29.4s, v20.4s\n" - "ldr q28, [x9, x14]\n" - "fmla v5.4s, v21.4s, v8.4s\n" - "ldr q27, [x27, x28]\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "add x9, x9, #16\n" - "fmla v4.4s, v21.4s, v9.4s\n" - "fmla v19.4s, v21.4s, v13.4s\n" - "fmla v22.4s, v21.4s, v12.4s\n" - "fmla v25.4s, v21.4s, v14.4s\n" - "fmla v0.4s, v27.4s, v8.4s\n" - "ldr q20, [x10, x23]\n" - "fmla v4.4s, v28.4s, v11.4s\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "fmla v22.4s, v28.4s, v13.4s\n" - "ldr q26, [x26, x12]\n" - "fmla v23.4s, v27.4s, v10.4s\n" - "ldr q21, [x24, x14]\n" - "fmla v16.4s, v20.4s, v8.4s\n" - "add x24, x24, #16\n" - "fmla v18.4s, v20.4s, v11.4s\n" - "fmla v19.4s, v20.4s, v9.4s\n" - "fmla v23.4s, v20.4s, v12.4s\n" - "fmla v25.4s, v20.4s, v10.4s\n" - "fmla v24.4s, v20.4s, v17.4s\n" - "ldr q28, [x27, x23]\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "ldr q20, [x10, x12]\n" - "fmla v19.4s, v26.4s, v11.4s\n" - "fmla v22.4s, v26.4s, v9.4s\n" - "fmla v23.4s, v26.4s, v13.4s\n" - "fmla v25.4s, v26.4s, v12.4s\n" - "fmla v24.4s, v26.4s, v14.4s\n" - "ldr q17, [x26, x14]\n" - "fmla v4.4s, v21.4s, v8.4s\n" - "ldr q26, [x27, x12]\n" - "fmla v22.4s, v21.4s, v11.4s\n" - "add x26, x26, #16\n" - "fmla v25.4s, v21.4s, v13.4s\n" - "ldr q27, [x10, x14]\n" - "fmla v18.4s, v28.4s, v8.4s\n" - "add x10, x10, #16\n" - "fmla v23.4s, v28.4s, v9.4s\n" - "fmla v24.4s, v28.4s, v10.4s\n" - "fmla v19.4s, v20.4s, v8.4s\n" - "ldr q28, [x27, x14]\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "add x27, x27, #16\n" - "fmla v23.4s, v20.4s, v11.4s\n" - "fmla v24.4s, v20.4s, v12.4s\n" - "fmla v22.4s, v17.4s, v8.4s\n" - "movi v29.16b, #0\n" - "fmla v25.4s, v17.4s, v11.4s\n" - "fmla v24.4s, v17.4s, v13.4s\n" - "fmla v23.4s, v26.4s, v8.4s\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmla v25.4s, v27.4s, v8.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "str q7, [%[outptr0]]\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "str q6, [%[outptr0], %[output_col_stride1]]\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "str q5, [%[outptr0], x19]\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "str q4, [%[outptr0], x20]\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "str q3, [x16]\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "str q2, [x16, %[output_col_stride1]]\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "str q1, [x16, x19]\n" - "fmax v22.4s, v22.4s, v29.4s\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "str q22, [x16, x20]\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "str q15, [x17]\n" - "fmax v19.4s, v19.4s, v29.4s\n" - "str q16, [x17, %[output_col_stride1]]\n" - "fmax v25.4s, v25.4s, v29.4s\n" - "str q19, [x17, x19]\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "str q25, [x17, x20]\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "str q0, [x7]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "str q18, [x7, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "str q23, [x7, x19]\n" - "add x16, x16, #16\n" - "str q24, [x7, x20]\n" - "add x17, x17, #16\n" - "add x7, x7, #16\n" - "4:\n" - "cbz x21, 7f\n" - "ldr s21, [%[wbptr]]\n" - "mov v7.16b, v21.16b\n" - "ldr s20, [%[wbptr], #4]\n" - "mov v3.16b, v21.16b\n" - "ldr s14, [%[wbptr], #8]\n" - "mov v6.16b, v21.16b\n" - "ldr s13, [%[wbptr], #12]\n" - "mov v15.16b, v21.16b\n" - "ldr s17, [%[wbptr], #16]\n" - "mov v2.16b, v21.16b\n" - "ldr s12, [%[wbptr], #20]\n" - "mov v5.16b, v21.16b\n" - "ldr s11, [%[wbptr], #24]\n" - "mov v0.16b, v21.16b\n" - "ldr s10, [%[wbptr], #28]\n" - "mov v16.16b, v21.16b\n" - "ldr s9, [%[wbptr], #32]\n" - "mov v1.16b, v21.16b\n" - "ldr s8, [%[wbptr], #36]\n" - "mov v4.16b, v21.16b\n" - "ldr s22, [%[inptr0]]\n" - "fmla v7.4s, v22.4s, v20.4s\n" - "ldr s19, [x9]\n" - "fmla v3.4s, v19.4s, v20.4s\n" - "ldr s23, [%[inptr0], %[input_col_stride1]]\n" - "fmla v6.4s, v23.4s, v20.4s\n" - "ldr s18, [x24]\n" - "fmla v7.4s, v19.4s, v17.4s\n" - "ldr s27, [x9, %[input_col_stride1]]\n" - "fmla v3.4s, v18.4s, v17.4s\n" - "ldr s28, [%[inptr0], x28]\n" - "fmla v15.4s, v18.4s, v20.4s\n" - "ldr s25, [x26]\n" - "fmla v7.4s, v23.4s, v14.4s\n" - "ldr s22, [x24, %[input_col_stride1]]\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x9, #64]\n" - "subs x21, x21, #1\n" - "prfm pldl1keep, [%[inptr0], x8]\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v7.4s, v18.4s, v10.4s\n" - "prfm pldl1keep, [x9, x8]\n" - "prfm pldl1keep, [%[inptr0], x25]\n" - "prfm pldl1keep, [x26, #64]\n" - "prfm pldl1keep, [x24, x8]\n" - "fmla v7.4s, v27.4s, v12.4s\n" - "beq 6f\n" - "5:\n" - "mov v18.16b, v21.16b\n" - "ldr s23, [x9, x28]\n" - "mov v19.16b, v21.16b\n" - "prfm pldl1keep, [x9, x25]\n" - "fmla v6.4s, v27.4s, v17.4s\n" - "prfm pldl1keep, [%[inptr0], x11]\n" - "fmla v2.4s, v27.4s, v20.4s\n" - "ldr s24, [%[inptr0], x23]\n" - "fmla v7.4s, v28.4s, v13.4s\n" - "prfm pldl1keep, [x10, #64]\n" - "fmla v6.4s, v28.4s, v14.4s\n" - "prfm pldl1keep, [x26, x8]\n" - "fmla v5.4s, v28.4s, v20.4s\n" - "ldr s26, [x10]\n" - "fmla v3.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x25]\n" - "fmla v15.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x9, x11]\n" - "fmla v0.4s, v25.4s, v20.4s\n" - "ldr s25, [x26, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x13]\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "prfm pldl1keep, [x27, #64]\n" - "fmla v6.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x10, x8]\n" - "fmla v15.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x26, x25]\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "prfm pldl1keep, [x24, x11]\n" - "fmla v16.4s, v22.4s, v20.4s\n" - "ldr s22, [x24, x28]\n" - "fmla v7.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [x9, x13]\n" - "fmla v3.4s, v23.4s, v13.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v6.4s, v23.4s, v12.4s\n" - "prfm pldl1keep, [x27, x8]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "prfm pldl1keep, [x10, x25]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "prfm pldl1keep, [x26, x11]\n" - "fmla v1.4s, v23.4s, v20.4s\n" - "ldr s23, [x9, x23]\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x24, x13]\n" - "fmla v5.4s, v24.4s, v14.4s\n" - "prfm pldl1keep, [x9, x15]\n" - "fmla v4.4s, v24.4s, v20.4s\n" - "ldr s24, [%[inptr0], x12]\n" - "fmla v15.4s, v26.4s, v10.4s\n" - "prfm pldl1keep, [x27, x25]\n" - "fmla v0.4s, v26.4s, v17.4s\n" - "ldr s29, [x27]\n" - "fmla v3.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x10, x11]\n" - "fmla v15.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x26, x13]\n" - "fmla v2.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "prfm pldl1keep, [x27, x11]\n" - "fmla v16.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x10, x13]\n" - "fmla v18.4s, v25.4s, v20.4s\n" - "ldr s26, [x10, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x26, x15]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x27, x13]\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x15]\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x27, x15]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v5.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v22.4s, v14.4s\n" - "subs x21, x21, #1\n" - "fmla v1.4s, v22.4s, v17.4s\n" - "fmla v19.4s, v22.4s, v20.4s\n" - "mov v22.16b, v21.16b\n" - "fmla v6.4s, v23.4s, v11.4s\n" - "fmla v2.4s, v23.4s, v13.4s\n" - "fmla v5.4s, v23.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v14.4s\n" - "fmla v4.4s, v23.4s, v17.4s\n" - "fmla v22.4s, v23.4s, v20.4s\n" - "ldr s27, [x26, x28]\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "fmla v0.4s, v29.4s, v10.4s\n" - "mov v23.16b, v21.16b\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "mov v25.16b, v21.16b\n" - "mov v24.16b, v21.16b\n" - "fmla v15.4s, v26.4s, v9.4s\n" - "fmla v0.4s, v26.4s, v12.4s\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "fmla v18.4s, v26.4s, v17.4s\n" - "fmla v3.4s, v27.4s, v8.4s\n" - "ldr s29, [x24, x23]\n" - "fmla v15.4s, v27.4s, v11.4s\n" - "fmla v2.4s, v27.4s, v9.4s\n" - "fmla v0.4s, v27.4s, v13.4s\n" - "fmla v16.4s, v27.4s, v12.4s\n" - "fmla v1.4s, v27.4s, v10.4s\n" - "fmla v18.4s, v27.4s, v14.4s\n" - "fmla v19.4s, v27.4s, v17.4s\n" - "fmla v23.4s, v27.4s, v20.4s\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "ldr s28, [x9, x12]\n" - "fmla v2.4s, v29.4s, v11.4s\n" - "fmla v5.4s, v29.4s, v9.4s\n" - "fmla v16.4s, v29.4s, v13.4s\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "fmla v4.4s, v29.4s, v10.4s\n" - "fmla v19.4s, v29.4s, v14.4s\n" - "fmla v22.4s, v29.4s, v17.4s\n" - "fmla v25.4s, v29.4s, v20.4s\n" - "fmla v5.4s, v28.4s, v11.4s\n" - "ldr s21, [%[inptr0], x14]\n" - "fmla v1.4s, v28.4s, v13.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v22.4s, v28.4s, v14.4s\n" - "ldr s26, [x27, %[input_col_stride1]]\n" - "fmla v0.4s, v26.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x8]\n" - "fmla v4.4s, v21.4s, v13.4s\n" - "ldr s21, [x10, x28]\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "ldr s29, [x26, x23]\n" - "fmla v15.4s, v21.4s, v8.4s\n" - "prfm pldl1keep, [%[inptr0], x25]\n" - "fmla v0.4s, v21.4s, v11.4s\n" - "fmla v16.4s, v21.4s, v9.4s\n" - "fmla v18.4s, v21.4s, v12.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v23.4s, v21.4s, v17.4s\n" - "ldr s21, [x24, x12]\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "fmla v16.4s, v29.4s, v11.4s\n" - "fmla v1.4s, v29.4s, v9.4s\n" - "fmla v18.4s, v29.4s, v13.4s\n" - "fmla v19.4s, v29.4s, v12.4s\n" - "fmla v22.4s, v29.4s, v10.4s\n" - "fmla v23.4s, v29.4s, v14.4s\n" - "fmla v25.4s, v29.4s, v17.4s\n" - "fmla v24.4s, v29.4s, v20.4s\n" - "ldr s28, [x9, x14]\n" - "fmla v5.4s, v21.4s, v8.4s\n" - "ldr s27, [x27, x28]\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "add x9, x9, #4\n" - "fmla v4.4s, v21.4s, v9.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "fmla v19.4s, v21.4s, v13.4s\n" - "prfm pldl1keep, [x9, x8]\n" - "fmla v22.4s, v21.4s, v12.4s\n" - "fmla v25.4s, v21.4s, v14.4s\n" - "fmla v4.4s, v28.4s, v11.4s\n" - "ldr s20, [x10, x23]\n" - "fmla v0.4s, v27.4s, v8.4s\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "fmla v22.4s, v28.4s, v13.4s\n" - "ldr s26, [x26, x12]\n" - "fmla v23.4s, v27.4s, v10.4s\n" - "ldr s21, [x24, x14]\n" - "fmla v16.4s, v20.4s, v8.4s\n" - "add x24, x24, #4\n" - "fmla v18.4s, v20.4s, v11.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v19.4s, v20.4s, v9.4s\n" - "prfm pldl1keep, [x24, x8]\n" - "fmla v23.4s, v20.4s, v12.4s\n" - "fmla v25.4s, v20.4s, v10.4s\n" - "fmla v24.4s, v20.4s, v17.4s\n" - "ldr s28, [x27, x23]\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "ldr s20, [x10, x12]\n" - "fmla v19.4s, v26.4s, v11.4s\n" - "fmla v22.4s, v26.4s, v9.4s\n" - "fmla v23.4s, v26.4s, v13.4s\n" - "fmla v25.4s, v26.4s, v12.4s\n" - "fmla v24.4s, v26.4s, v14.4s\n" - "ldr s17, [x26, x14]\n" - "fmla v4.4s, v21.4s, v8.4s\n" - "ldr s26, [x27, x12]\n" - "fmla v22.4s, v21.4s, v11.4s\n" - "add x26, x26, #4\n" - "fmla v25.4s, v21.4s, v13.4s\n" - "ldr s27, [x10, x14]\n" - "fmla v18.4s, v28.4s, v8.4s\n" - "prfm pldl1keep, [x26, #64]\n" - "fmla v23.4s, v28.4s, v9.4s\n" - "add x10, x10, #4\n" - "fmla v24.4s, v28.4s, v10.4s\n" - "ldr s28, [x27, x14]\n" - "fmla v19.4s, v20.4s, v8.4s\n" - "ldr s21, [%[wbptr]]\n" - "fmla v23.4s, v20.4s, v11.4s\n" - "add x27, x27, #4\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "fmla v24.4s, v20.4s, v12.4s\n" - "fmla v22.4s, v17.4s, v8.4s\n" - "ldr s20, [%[wbptr], #4]\n" - "fmla v23.4s, v26.4s, v8.4s\n" - "ldr s14, [%[wbptr], #8]\n" - "fmla v24.4s, v17.4s, v13.4s\n" - "movi v29.16b, #0\n" - "fmla v25.4s, v17.4s, v11.4s\n" - "ldr s17, [%[wbptr], #16]\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "ldr s13, [%[wbptr], #12]\n" - "str s7, [%[outptr0]]\n" - "fmla v25.4s, v27.4s, v8.4s\n" - "str s6, [%[outptr0], %[output_col_stride1]]\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "ldr s12, [%[wbptr], #20]\n" - "str s5, [%[outptr0], x19]\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "ldr s10, [%[wbptr], #28]\n" - "str s4, [%[outptr0], x20]\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "str s3, [x16]\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "ldr s11, [%[wbptr], #24]\n" - "str s2, [x16, %[output_col_stride1]]\n" - "fmax v22.4s, v22.4s, v29.4s\n" - "str s1, [x16, x19]\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "str s22, [x16, x20]\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "str s15, [x17]\n" - "fmax v19.4s, v19.4s, v29.4s\n" - "str s16, [x17, %[output_col_stride1]]\n" - "fmax v25.4s, v25.4s, v29.4s\n" - "str s19, [x17, x19]\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "str s25, [x17, x20]\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "str s0, [x7]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "str s18, [x7, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "str s23, [x7, x19]\n" - "mov v7.16b, v21.16b\n" - "str s24, [x7, x20]\n" - "mov v3.16b, v21.16b\n" - "mov v6.16b, v21.16b\n" - "ldr s9, [%[wbptr], #32]\n" - "mov v15.16b, v21.16b\n" - "ldr s8, [%[wbptr], #36]\n" - "mov v2.16b, v21.16b\n" - "ldr s22, [%[inptr0]]\n" - "mov v5.16b, v21.16b\n" - "ldr s19, [x9]\n" - "mov v0.16b, v21.16b\n" - "ldr s23, [%[inptr0], %[input_col_stride1]]\n" - "mov v16.16b, v21.16b\n" - "ldr s18, [x24]\n" - "mov v1.16b, v21.16b\n" - "ldr s27, [x9, %[input_col_stride1]]\n" - "mov v4.16b, v21.16b\n" - "ldr s28, [%[inptr0], x28]\n" - "fmla v7.4s, v22.4s, v20.4s\n" - "ldr s25, [x26]\n" - "fmla v3.4s, v19.4s, v20.4s\n" - "ldr s22, [x24, %[input_col_stride1]]\n" - "fmla v6.4s, v23.4s, v20.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmla v7.4s, v19.4s, v17.4s\n" - "add x16, x16, #4\n" - "fmla v3.4s, v18.4s, v17.4s\n" - "add x17, x17, #4\n" - "fmla v15.4s, v18.4s, v20.4s\n" - "add x7, x7, #4\n" - "fmla v7.4s, v23.4s, v14.4s\n" - "fmla v3.4s, v27.4s, v14.4s\n" - "fmla v7.4s, v18.4s, v10.4s\n" - "fmla v7.4s, v27.4s, v12.4s\n" - "bne 5b\n" - "6:\n" - "mov v18.16b, v21.16b\n" - "ldr s23, [x9, x28]\n" - "mov v19.16b, v21.16b\n" - "prfm pldl1keep, [x9, x25]\n" - "fmla v6.4s, v27.4s, v17.4s\n" - "prfm pldl1keep, [%[inptr0], x11]\n" - "fmla v2.4s, v27.4s, v20.4s\n" - "ldr s24, [%[inptr0], x23]\n" - "fmla v7.4s, v28.4s, v13.4s\n" - "prfm pldl1keep, [x10, #64]\n" - "fmla v6.4s, v28.4s, v14.4s\n" - "prfm pldl1keep, [x26, x8]\n" - "fmla v5.4s, v28.4s, v20.4s\n" - "ldr s26, [x10]\n" - "fmla v3.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x25]\n" - "fmla v15.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x9, x11]\n" - "fmla v0.4s, v25.4s, v20.4s\n" - "ldr s25, [x26, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [%[inptr0], x13]\n" - "fmla v3.4s, v22.4s, v12.4s\n" - "prfm pldl1keep, [x27, #64]\n" - "fmla v6.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [x10, x8]\n" - "fmla v15.4s, v22.4s, v14.4s\n" - "prfm pldl1keep, [x26, x25]\n" - "fmla v2.4s, v22.4s, v17.4s\n" - "prfm pldl1keep, [x24, x11]\n" - "fmla v16.4s, v22.4s, v20.4s\n" - "ldr s22, [x24, x28]\n" - "fmla v7.4s, v23.4s, v11.4s\n" - "prfm pldl1keep, [x9, x13]\n" - "fmla v3.4s, v23.4s, v13.4s\n" - "prfm pldl1keep, [%[inptr0], x15]\n" - "fmla v6.4s, v23.4s, v12.4s\n" - "prfm pldl1keep, [x27, x8]\n" - "fmla v2.4s, v23.4s, v14.4s\n" - "prfm pldl1keep, [x10, x25]\n" - "fmla v5.4s, v23.4s, v17.4s\n" - "prfm pldl1keep, [x26, x11]\n" - "fmla v1.4s, v23.4s, v20.4s\n" - "ldr s23, [x9, x23]\n" - "fmla v6.4s, v24.4s, v13.4s\n" - "prfm pldl1keep, [x24, x13]\n" - "fmla v5.4s, v24.4s, v14.4s\n" - "prfm pldl1keep, [x9, x15]\n" - "fmla v4.4s, v24.4s, v20.4s\n" - "ldr s24, [%[inptr0], x12]\n" - "fmla v15.4s, v26.4s, v10.4s\n" - "prfm pldl1keep, [x27, x25]\n" - "fmla v0.4s, v26.4s, v17.4s\n" - "ldr s29, [x27]\n" - "fmla v3.4s, v25.4s, v9.4s\n" - "prfm pldl1keep, [x10, x11]\n" - "fmla v15.4s, v25.4s, v12.4s\n" - "prfm pldl1keep, [x26, x13]\n" - "fmla v2.4s, v25.4s, v10.4s\n" - "prfm pldl1keep, [x24, x15]\n" - "fmla v0.4s, v25.4s, v14.4s\n" - "prfm pldl1keep, [x27, x11]\n" - "fmla v16.4s, v25.4s, v17.4s\n" - "prfm pldl1keep, [x10, x13]\n" - "fmla v18.4s, v25.4s, v20.4s\n" - "ldr s26, [x10, %[input_col_stride1]]\n" - "fmla v7.4s, v22.4s, v8.4s\n" - "prfm pldl1keep, [x26, x15]\n" - "fmla v3.4s, v22.4s, v11.4s\n" - "prfm pldl1keep, [x27, x13]\n" - "fmla v6.4s, v22.4s, v9.4s\n" - "prfm pldl1keep, [x10, x15]\n" - "fmla v15.4s, v22.4s, v13.4s\n" - "prfm pldl1keep, [x27, x15]\n" - "fmla v2.4s, v22.4s, v12.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v5.4s, v22.4s, v10.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v22.4s, v14.4s\n" - "fmla v1.4s, v22.4s, v17.4s\n" - "fmla v19.4s, v22.4s, v20.4s\n" - "ldr s27, [x26, x28]\n" - "fmla v6.4s, v23.4s, v11.4s\n" - "fmla v2.4s, v23.4s, v13.4s\n" - "fmla v5.4s, v23.4s, v12.4s\n" - "fmla v1.4s, v23.4s, v14.4s\n" - "fmla v4.4s, v23.4s, v17.4s\n" - "fmla v0.4s, v29.4s, v10.4s\n" - "mov v22.16b, v21.16b\n" - "fmla v15.4s, v26.4s, v9.4s\n" - "fmla v5.4s, v24.4s, v13.4s\n" - "fmla v16.4s, v26.4s, v10.4s\n" - "fmla v22.4s, v23.4s, v20.4s\n" - "ldr s29, [x24, x23]\n" - "fmla v4.4s, v24.4s, v14.4s\n" - "ldr s28, [x9, x12]\n" - "fmla v0.4s, v26.4s, v12.4s\n" - "fmla v18.4s, v26.4s, v17.4s\n" - "mov v23.16b, v21.16b\n" - "fmla v3.4s, v27.4s, v8.4s\n" - "fmla v15.4s, v27.4s, v11.4s\n" - "fmla v2.4s, v27.4s, v9.4s\n" - "fmla v0.4s, v27.4s, v13.4s\n" - "fmla v16.4s, v27.4s, v12.4s\n" - "fmla v1.4s, v27.4s, v10.4s\n" - "fmla v18.4s, v27.4s, v14.4s\n" - "fmla v19.4s, v27.4s, v17.4s\n" - "fmla v23.4s, v27.4s, v20.4s\n" - "mov v25.16b, v21.16b\n" - "mov v24.16b, v21.16b\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "fmla v2.4s, v29.4s, v11.4s\n" - "fmla v5.4s, v29.4s, v9.4s\n" - "fmla v16.4s, v29.4s, v13.4s\n" - "fmla v1.4s, v29.4s, v12.4s\n" - "fmla v4.4s, v29.4s, v10.4s\n" - "fmla v19.4s, v29.4s, v14.4s\n" - "fmla v22.4s, v29.4s, v17.4s\n" - "fmla v25.4s, v29.4s, v20.4s\n" - "ldr s21, [%[inptr0], x14]\n" - "fmla v5.4s, v28.4s, v11.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v1.4s, v28.4s, v13.4s\n" - "fmla v4.4s, v28.4s, v12.4s\n" - "fmla v22.4s, v28.4s, v14.4s\n" - "ldr s26, [x27, %[input_col_stride1]]\n" - "fmla v0.4s, v26.4s, v9.4s\n" - "fmla v18.4s, v26.4s, v10.4s\n" - "fmla v4.4s, v21.4s, v13.4s\n" - "ldr s21, [x10, x28]\n" - "fmla v15.4s, v21.4s, v8.4s\n" - "ldr s29, [x26, x23]\n" - "fmla v0.4s, v21.4s, v11.4s\n" - "fmla v16.4s, v21.4s, v9.4s\n" - "fmla v18.4s, v21.4s, v12.4s\n" - "fmla v19.4s, v21.4s, v10.4s\n" - "fmla v23.4s, v21.4s, v17.4s\n" - "ldr s21, [x24, x12]\n" - "fmla v2.4s, v29.4s, v8.4s\n" - "fmla v16.4s, v29.4s, v11.4s\n" - "fmla v1.4s, v29.4s, v9.4s\n" - "fmla v18.4s, v29.4s, v13.4s\n" - "fmla v19.4s, v29.4s, v12.4s\n" - "fmla v22.4s, v29.4s, v10.4s\n" - "fmla v23.4s, v29.4s, v14.4s\n" - "fmla v25.4s, v29.4s, v17.4s\n" - "fmla v24.4s, v29.4s, v20.4s\n" - "ldr s28, [x9, x14]\n" - "fmla v5.4s, v21.4s, v8.4s\n" - "ldr s27, [x27, x28]\n" - "fmla v1.4s, v21.4s, v11.4s\n" - "add x9, x9, #4\n" - "fmla v4.4s, v21.4s, v9.4s\n" - "fmla v19.4s, v21.4s, v13.4s\n" - "fmla v22.4s, v21.4s, v12.4s\n" - "fmla v25.4s, v21.4s, v14.4s\n" - "fmla v0.4s, v27.4s, v8.4s\n" - "ldr s20, [x10, x23]\n" - "fmla v4.4s, v28.4s, v11.4s\n" - "fmla v18.4s, v27.4s, v9.4s\n" - "fmla v22.4s, v28.4s, v13.4s\n" - "ldr s26, [x26, x12]\n" - "fmla v23.4s, v27.4s, v10.4s\n" - "ldr s21, [x24, x14]\n" - "fmla v16.4s, v20.4s, v8.4s\n" - "add x24, x24, #4\n" - "fmla v18.4s, v20.4s, v11.4s\n" - "fmla v19.4s, v20.4s, v9.4s\n" - "fmla v23.4s, v20.4s, v12.4s\n" - "fmla v25.4s, v20.4s, v10.4s\n" - "fmla v24.4s, v20.4s, v17.4s\n" - "ldr s28, [x27, x23]\n" - "fmla v1.4s, v26.4s, v8.4s\n" - "ldr s20, [x10, x12]\n" - "fmla v19.4s, v26.4s, v11.4s\n" - "fmla v22.4s, v26.4s, v9.4s\n" - "fmla v23.4s, v26.4s, v13.4s\n" - "fmla v25.4s, v26.4s, v12.4s\n" - "fmla v24.4s, v26.4s, v14.4s\n" - "ldr s17, [x26, x14]\n" - "fmla v4.4s, v21.4s, v8.4s\n" - "ldr s26, [x27, x12]\n" - "fmla v22.4s, v21.4s, v11.4s\n" - "add x26, x26, #4\n" - "fmla v25.4s, v21.4s, v13.4s\n" - "ldr s27, [x10, x14]\n" - "fmla v18.4s, v28.4s, v8.4s\n" - "add x10, x10, #4\n" - "fmla v23.4s, v28.4s, v9.4s\n" - "fmla v24.4s, v28.4s, v10.4s\n" - "fmla v19.4s, v20.4s, v8.4s\n" - "ldr s28, [x27, x14]\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "add x27, x27, #4\n" - "fmla v23.4s, v20.4s, v11.4s\n" - "fmla v24.4s, v20.4s, v12.4s\n" - "fmla v22.4s, v17.4s, v8.4s\n" - "movi v29.16b, #0\n" - "fmla v25.4s, v17.4s, v11.4s\n" - "fmla v24.4s, v17.4s, v13.4s\n" - "fmla v23.4s, v26.4s, v8.4s\n" - "fmax v7.4s, v7.4s, v29.4s\n" - "fmla v25.4s, v27.4s, v8.4s\n" - "fmax v6.4s, v6.4s, v29.4s\n" - "str s7, [%[outptr0]]\n" - "fmla v24.4s, v26.4s, v9.4s\n" - "str s6, [%[outptr0], %[output_col_stride1]]\n" - "fmax v5.4s, v5.4s, v29.4s\n" - "fmax v4.4s, v4.4s, v29.4s\n" - "fmax v3.4s, v3.4s, v29.4s\n" - "str s5, [%[outptr0], x19]\n" - "fmla v24.4s, v27.4s, v11.4s\n" - "str s4, [%[outptr0], x20]\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "str s3, [x16]\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "str s2, [x16, %[output_col_stride1]]\n" - "fmla v24.4s, v28.4s, v8.4s\n" - "str s1, [x16, x19]\n" - "fmax v22.4s, v22.4s, v29.4s\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "str s22, [x16, x20]\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "str s15, [x17]\n" - "fmax v19.4s, v19.4s, v29.4s\n" - "str s16, [x17, %[output_col_stride1]]\n" - "fmax v25.4s, v25.4s, v29.4s\n" - "str s19, [x17, x19]\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "str s25, [x17, x20]\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "str s0, [x7]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "str s18, [x7, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "str s23, [x7, x19]\n" - "add x16, x16, #4\n" - "str s24, [x7, x20]\n" - "add x17, x17, #4\n" - "add x7, x7, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr), [outptr0] "+r" (output), [inptr0] "+r" (input) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [input_col_stride1] "r" (input_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x7", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x8", "x9", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU>( - int n_channels, - const void *weight_bias_ptr, - const float *inptrs[6][6], - float *outptrs[4][4] -) -{ - __asm __volatile( - "mov x27, xzr\n" - "mov x28, xzr\n" - "and x19, %[n_channels], #3\n" - "lsr x26, %[n_channels], #2\n" - "cbz x26, 4f\n" - "1:\n" - "ldr q25, [%[wbptr]]\n" - "ldr x25, [%[inptrs], 0]\n" - "mov v2.16b, v25.16b\n" - "ldr q22, [%[wbptr], #16]\n" - "mov v16.16b, v25.16b\n" - "ldr q9, [%[wbptr], #32]\n" - "mov v18.16b, v25.16b\n" - "ldr q8, [%[wbptr], #48]\n" - "mov v13.16b, v25.16b\n" - "ldr q19, [%[wbptr], #64]\n" - "mov v0.16b, v25.16b\n" - "ldr q7, [%[wbptr], #80]\n" - "mov v17.16b, v25.16b\n" - "ldr q6, [%[wbptr], #96]\n" - "mov v14.16b, v25.16b\n" - "ldr q5, [%[wbptr], #112]\n" - "mov v12.16b, v25.16b\n" - "ldr q4, [%[wbptr], #128]\n" - "mov v15.16b, v25.16b\n" - "ldr q3, [%[wbptr], #144]\n" - "ldr q27, [x25, x27]\n" - "ldr x17, [%[inptrs], 48]\n" - "fmla v2.4s, v27.4s, v22.4s\n" - "ldr x25, [%[inptrs], 8]\n" - "ldr q26, [x17, x27]\n" - "ldr x24, [%[inptrs], 96]\n" - "fmla v16.4s, v26.4s, v22.4s\n" - "ldr q31, [x25, x27]\n" - "ldr q28, [x24, x27]\n" - "ldr x17, [%[inptrs], 56]\n" - "fmla v2.4s, v26.4s, v19.4s\n" - "ldr x25, [%[inptrs], 16]\n" - "ldr q29, [x17, x27]\n" - "ldr x7, [%[inptrs], 144]\n" - "ldr x24, [%[inptrs], 104]\n" - "subs x26, x26, #1\n" - "ldr q30, [x25, x27]\n" - "ldr q27, [x7, x27]\n" - "ldr q21, [x24, x27]\n" - "fmla v2.4s, v31.4s, v9.4s\n" - "beq 3f\n" - "2:\n" - "mov v1.16b, v25.16b\n" - "ldr x17, [%[inptrs], 64]\n" - "mov v10.16b, v25.16b\n" - "ldr x25, [%[inptrs], 24]\n" - "fmla v18.4s, v31.4s, v22.4s\n" - "ldr q23, [x17, x27]\n" - "fmla v2.4s, v28.4s, v5.4s\n" - "ldr x15, [%[inptrs], 192]\n" - "fmla v16.4s, v28.4s, v19.4s\n" - "ldr x7, [%[inptrs], 152]\n" - "fmla v13.4s, v28.4s, v22.4s\n" - "ldr q26, [x25, x27]\n" - "fmla v18.4s, v29.4s, v19.4s\n" - "ldr x24, [%[inptrs], 112]\n" - "fmla v2.4s, v29.4s, v7.4s\n" - "ldr x17, [%[inptrs], 72]\n" - "fmla v16.4s, v29.4s, v9.4s\n" - "ldr x25, [%[inptrs], 32]\n" - "fmla v0.4s, v29.4s, v22.4s\n" - "ldr q28, [x15, x27]\n" - "fmla v18.4s, v30.4s, v9.4s\n" - "ldr x16, [%[inptrs], 240]\n" - "fmla v2.4s, v30.4s, v8.4s\n" - "ldr x15, [%[inptrs], 200]\n" - "fmla v17.4s, v30.4s, v22.4s\n" - "ldr q29, [x7, x27]\n" - "fmla v16.4s, v27.4s, v5.4s\n" - "ldr x7, [%[inptrs], 160]\n" - "fmla v13.4s, v27.4s, v19.4s\n" - "ldr x20, [%[outptrs], 0]\n" - "fmla v14.4s, v27.4s, v22.4s\n" - "ldr q20, [x24, x27]\n" - "fmla v2.4s, v21.4s, v4.4s\n" - "ldr x24, [%[inptrs], 120]\n" - "fmla v16.4s, v21.4s, v7.4s\n" - "ldr x21, [%[outptrs], 32]\n" - "fmla v18.4s, v21.4s, v5.4s\n" - "ldr x22, [%[outptrs], 64]\n" - "fmla v13.4s, v21.4s, v9.4s\n" - "ldr x23, [%[outptrs], 96]\n" - "fmla v0.4s, v21.4s, v19.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v12.4s, v21.4s, v22.4s\n" - "ldr q24, [x17, x27]\n" - "fmla v2.4s, v23.4s, v6.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v23.4s, v8.4s\n" - "ldr x17, [%[inptrs], 80]\n" - "fmla v18.4s, v23.4s, v7.4s\n" - "subs x26, x26, #1\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "fmla v17.4s, v23.4s, v19.4s\n" - "fmla v15.4s, v23.4s, v22.4s\n" - "ldr q23, [x25, x27]\n" - "fmla v1.4s, v26.4s, v22.4s\n" - "ldr x25, [%[inptrs], 40]\n" - "fmla v18.4s, v26.4s, v8.4s\n" - "fmla v13.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v26.4s, v9.4s\n" - "ldr q30, [x16, x27]\n" - "fmla v14.4s, v28.4s, v19.4s\n" - "ldr q26, [x15, x27]\n" - "fmla v16.4s, v29.4s, v4.4s\n" - "ldr x16, [%[inptrs], 248]\n" - "fmla v13.4s, v29.4s, v7.4s\n" - "ldr x15, [%[inptrs], 208]\n" - "fmla v0.4s, v29.4s, v5.4s\n" - "fmla v12.4s, v29.4s, v19.4s\n" - "fmla v14.4s, v29.4s, v9.4s\n" - "fmla v10.4s, v29.4s, v22.4s\n" - "mov v11.16b, v25.16b\n" - "fmla v2.4s, v20.4s, v3.4s\n" - "fmla v16.4s, v20.4s, v6.4s\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "fmla v13.4s, v20.4s, v8.4s\n" - "fmla v0.4s, v20.4s, v7.4s\n" - "fmla v17.4s, v20.4s, v5.4s\n" - "fmla v12.4s, v20.4s, v9.4s\n" - "fmla v15.4s, v20.4s, v19.4s\n" - "fmla v11.4s, v20.4s, v22.4s\n" - "mov v21.16b, v25.16b\n" - "fmla v18.4s, v24.4s, v6.4s\n" - "fmla v0.4s, v24.4s, v8.4s\n" - "fmla v1.4s, v24.4s, v19.4s\n" - "fmla v17.4s, v24.4s, v7.4s\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "mov v20.16b, v25.16b\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "fmla v21.4s, v24.4s, v22.4s\n" - "ldr q27, [x7, x27]\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "ldr x7, [%[inptrs], 168]\n" - "fmla v17.4s, v23.4s, v8.4s\n" - "ldr q30, [x24, x27]\n" - "fmla v13.4s, v26.4s, v4.4s\n" - "ldr x24, [%[inptrs], 128]\n" - "fmla v14.4s, v26.4s, v7.4s\n" - "fmla v12.4s, v26.4s, v5.4s\n" - "fmla v10.4s, v26.4s, v19.4s\n" - "ldr q31, [x17, x27]\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "ldr x17, [%[inptrs], 88]\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v0.4s, v27.4s, v4.4s\n" - "fmla v14.4s, v27.4s, v8.4s\n" - "fmla v12.4s, v27.4s, v7.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "fmla v10.4s, v27.4s, v9.4s\n" - "fmla v11.4s, v27.4s, v19.4s\n" - "fmla v20.4s, v27.4s, v22.4s\n" - "mov v24.16b, v25.16b\n" - "mov v23.16b, v25.16b\n" - "fmla v18.4s, v30.4s, v3.4s\n" - "fmla v0.4s, v30.4s, v6.4s\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "fmla v12.4s, v30.4s, v8.4s\n" - "fmla v15.4s, v30.4s, v7.4s\n" - "fmla v1.4s, v30.4s, v5.4s\n" - "fmla v11.4s, v30.4s, v9.4s\n" - "fmla v21.4s, v30.4s, v19.4s\n" - "fmla v24.4s, v30.4s, v22.4s\n" - "ldr q25, [x25, x27]\n" - "fmla v17.4s, v31.4s, v6.4s\n" - "ldr x25, [%[inptrs], 0]\n" - "fmla v15.4s, v31.4s, v8.4s\n" - "fmla v1.4s, v31.4s, v7.4s\n" - "fmla v21.4s, v31.4s, v9.4s\n" - "ldr q26, [x16, x27]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "ldr x16, [%[inptrs], 256]\n" - "fmla v10.4s, v26.4s, v5.4s\n" - "ldr q31, [x15, x27]\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "ldr q29, [x7, x27]\n" - "fmla v13.4s, v31.4s, v3.4s\n" - "ldr x15, [%[inptrs], 216]\n" - "fmla v14.4s, v31.4s, v6.4s\n" - "ldr x7, [%[inptrs], 176]\n" - "fmla v12.4s, v31.4s, v4.4s\n" - "fmla v10.4s, v31.4s, v7.4s\n" - "fmla v11.4s, v31.4s, v5.4s\n" - "fmla v20.4s, v31.4s, v19.4s\n" - "fmla v0.4s, v29.4s, v3.4s\n" - "ldr q28, [x24, x27]\n" - "fmla v15.4s, v29.4s, v4.4s\n" - "ldr x24, [%[inptrs], 136]\n" - "fmla v12.4s, v29.4s, v6.4s\n" - "fmla v10.4s, v29.4s, v8.4s\n" - "fmla v11.4s, v29.4s, v7.4s\n" - "fmla v21.4s, v29.4s, v5.4s\n" - "fmla v20.4s, v29.4s, v9.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v23.4s, v29.4s, v22.4s\n" - "ldr q25, [x17, x27]\n" - "fmla v17.4s, v28.4s, v3.4s\n" - "ldr q29, [x16, x27]\n" - "fmla v15.4s, v28.4s, v6.4s\n" - "ldr x16, [%[inptrs], 264]\n" - "fmla v1.4s, v28.4s, v4.4s\n" - "ldr x17, [%[inptrs], 48]\n" - "fmla v11.4s, v28.4s, v8.4s\n" - "fmla v21.4s, v28.4s, v7.4s\n" - "fmla v24.4s, v28.4s, v9.4s\n" - "ldr q22, [x15, x27]\n" - "fmla v14.4s, v29.4s, v3.4s\n" - "ldr x15, [%[inptrs], 224]\n" - "fmla v1.4s, v25.4s, v6.4s\n" - "fmla v10.4s, v29.4s, v4.4s\n" - "fmla v21.4s, v25.4s, v8.4s\n" - "ldr q27, [x7, x27]\n" - "fmla v20.4s, v29.4s, v5.4s\n" - "ldr q26, [x24, x27]\n" - "fmla v12.4s, v22.4s, v3.4s\n" - "ldr x7, [%[inptrs], 184]\n" - "fmla v10.4s, v22.4s, v6.4s\n" - "ldr x24, [%[inptrs], 96]\n" - "fmla v11.4s, v22.4s, v4.4s\n" - "fmla v24.4s, v22.4s, v5.4s\n" - "fmla v20.4s, v22.4s, v7.4s\n" - "fmla v23.4s, v22.4s, v19.4s\n" - "fmla v15.4s, v27.4s, v3.4s\n" - "ldr q25, [x16, x27]\n" - "fmla v21.4s, v27.4s, v4.4s\n" - "ldr q31, [x15, x27]\n" - "fmla v11.4s, v27.4s, v6.4s\n" - "ldr x16, [%[inptrs], 272]\n" - "fmla v20.4s, v27.4s, v8.4s\n" - "ldr x15, [%[inptrs], 232]\n" - "fmla v24.4s, v27.4s, v7.4s\n" - "fmla v23.4s, v27.4s, v9.4s\n" - "fmla v1.4s, v26.4s, v3.4s\n" - "ldr q22, [x7, x27]\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr q19, [x16, x27]\n" - "fmla v10.4s, v25.4s, v3.4s\n" - "ldr x16, [%[inptrs], 280]\n" - "fmla v24.4s, v26.4s, v8.4s\n" - "ldr q28, [x15, x27]\n" - "fmla v20.4s, v25.4s, v4.4s\n" - "ldr x7, [%[inptrs], 144]\n" - "fmla v23.4s, v25.4s, v5.4s\n" - "ldr q30, [x16, x27]\n" - "fmla v11.4s, v31.4s, v3.4s\n" - "add x27, x27, #16\n" - "fmla v24.4s, v31.4s, v4.4s\n" - "ldr q27, [x25, x27]\n" - "fmla v20.4s, v31.4s, v6.4s\n" - "ldr x25, [%[inptrs], 8]\n" - "fmla v23.4s, v31.4s, v7.4s\n" - "movi v29.16b, #0\n" - "fmla v21.4s, v22.4s, v3.4s\n" - "ldr q26, [x17, x27]\n" - "fmla v24.4s, v22.4s, v6.4s\n" - "ldr x17, [%[inptrs], 56]\n" - "fmla v20.4s, v19.4s, v3.4s\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmla v23.4s, v22.4s, v8.4s\n" - "ldr q25, [%[wbptr]]\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "ldr q22, [%[wbptr], #16]\n" - "str q2, [x20, x28]\n" - "fmla v24.4s, v28.4s, v3.4s\n" - "fmax v17.4s, v17.4s, v29.4s\n" - "ldr q9, [%[wbptr], #32]\n" - "fmla v23.4s, v19.4s, v4.4s\n" - "ldr q8, [%[wbptr], #48]\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "ldr q19, [%[wbptr], #64]\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "ldr x20, [%[outptrs], 8]\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "str q18, [x20, x28]\n" - "fmla v23.4s, v28.4s, v6.4s\n" - "str q16, [x21, x28]\n" - "fmax v21.4s, v21.4s, v29.4s\n" - "fmax v13.4s, v13.4s, v29.4s\n" - "ldr q7, [%[wbptr], #80]\n" - "fmax v12.4s, v12.4s, v29.4s\n" - "ldr q5, [%[wbptr], #112]\n" - "fmla v23.4s, v30.4s, v3.4s\n" - "ldr q6, [%[wbptr], #96]\n" - "str q13, [x22, x28]\n" - "fmax v11.4s, v11.4s, v29.4s\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "ldr q4, [%[wbptr], #128]\n" - "fmax v14.4s, v14.4s, v29.4s\n" - "ldr q31, [x25, x27]\n" - "fmax v10.4s, v10.4s, v29.4s\n" - "ldr q3, [%[wbptr], #144]\n" - "fmax v20.4s, v20.4s, v29.4s\n" - "ldr q28, [x24, x27]\n" - "str q14, [x23, x28]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "mov v2.16b, v25.16b\n" - "ldr q29, [x17, x27]\n" - "ldr x20, [%[outptrs], 16]\n" - "ldr x21, [%[outptrs], 40]\n" - "ldr x22, [%[outptrs], 72]\n" - "ldr x23, [%[outptrs], 104]\n" - "ldr x25, [%[inptrs], 16]\n" - "ldr x24, [%[inptrs], 104]\n" - "str q17, [x20, x28]\n" - "mov v16.16b, v25.16b\n" - "str q0, [x21, x28]\n" - "mov v18.16b, v25.16b\n" - "str q12, [x22, x28]\n" - "mov v13.16b, v25.16b\n" - "str q10, [x23, x28]\n" - "mov v0.16b, v25.16b\n" - "fmla v2.4s, v27.4s, v22.4s\n" - "ldr q30, [x25, x27]\n" - "fmla v16.4s, v26.4s, v22.4s\n" - "ldr x20, [%[outptrs], 24]\n" - "mov v17.16b, v25.16b\n" - "ldr x21, [%[outptrs], 48]\n" - "str q1, [x20, x28]\n" - "mov v14.16b, v25.16b\n" - "str q15, [x21, x28]\n" - "mov v12.16b, v25.16b\n" - "mov v15.16b, v25.16b\n" - "ldr x21, [%[outptrs], 56]\n" - "fmla v2.4s, v26.4s, v19.4s\n" - "ldr q27, [x7, x27]\n" - "str q21, [x21, x28]\n" - "ldr x22, [%[outptrs], 80]\n" - "ldr q21, [x24, x27]\n" - "ldr x23, [%[outptrs], 112]\n" - "str q11, [x22, x28]\n" - "fmla v2.4s, v31.4s, v9.4s\n" - "str q20, [x23, x28]\n" - "ldr x22, [%[outptrs], 88]\n" - "ldr x23, [%[outptrs], 120]\n" - "str q24, [x22, x28]\n" - "str q23, [x23, x28]\n" - "add x28, x28, #16\n" - "bne 2b\n" - "3:\n" - "mov v1.16b, v25.16b\n" - "ldr x17, [%[inptrs], 64]\n" - "mov v10.16b, v25.16b\n" - "ldr x25, [%[inptrs], 24]\n" - "mov v11.16b, v25.16b\n" - "ldr x15, [%[inptrs], 192]\n" - "fmla v18.4s, v31.4s, v22.4s\n" - "ldr q23, [x17, x27]\n" - "fmla v2.4s, v28.4s, v5.4s\n" - "ldr x7, [%[inptrs], 152]\n" - "fmla v16.4s, v28.4s, v19.4s\n" - "ldr x24, [%[inptrs], 112]\n" - "fmla v13.4s, v28.4s, v22.4s\n" - "ldr q26, [x25, x27]\n" - "fmla v18.4s, v29.4s, v19.4s\n" - "ldr x17, [%[inptrs], 72]\n" - "fmla v2.4s, v29.4s, v7.4s\n" - "ldr x25, [%[inptrs], 32]\n" - "fmla v16.4s, v29.4s, v9.4s\n" - "ldr x16, [%[inptrs], 240]\n" - "fmla v0.4s, v29.4s, v22.4s\n" - "ldr q28, [x15, x27]\n" - "fmla v18.4s, v30.4s, v9.4s\n" - "ldr x15, [%[inptrs], 200]\n" - "fmla v2.4s, v30.4s, v8.4s\n" - "ldr x20, [%[outptrs], 0]\n" - "fmla v17.4s, v30.4s, v22.4s\n" - "ldr q29, [x7, x27]\n" - "fmla v16.4s, v27.4s, v5.4s\n" - "ldr x7, [%[inptrs], 160]\n" - "fmla v13.4s, v27.4s, v19.4s\n" - "ldr x21, [%[outptrs], 32]\n" - "fmla v14.4s, v27.4s, v22.4s\n" - "ldr q20, [x24, x27]\n" - "fmla v2.4s, v21.4s, v4.4s\n" - "ldr x24, [%[inptrs], 120]\n" - "fmla v16.4s, v21.4s, v7.4s\n" - "ldr x22, [%[outptrs], 64]\n" - "fmla v18.4s, v21.4s, v5.4s\n" - "ldr x23, [%[outptrs], 96]\n" - "fmla v13.4s, v21.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v0.4s, v21.4s, v19.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v12.4s, v21.4s, v22.4s\n" - "ldr q24, [x17, x27]\n" - "fmla v2.4s, v23.4s, v6.4s\n" - "ldr x17, [%[inptrs], 80]\n" - "fmla v16.4s, v23.4s, v8.4s\n" - "fmla v18.4s, v23.4s, v7.4s\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "fmla v17.4s, v23.4s, v19.4s\n" - "fmla v15.4s, v23.4s, v22.4s\n" - "ldr q23, [x25, x27]\n" - "fmla v1.4s, v26.4s, v22.4s\n" - "ldr x25, [%[inptrs], 40]\n" - "fmla v18.4s, v26.4s, v8.4s\n" - "fmla v13.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v26.4s, v9.4s\n" - "ldr q30, [x16, x27]\n" - "fmla v14.4s, v28.4s, v19.4s\n" - "ldr q26, [x15, x27]\n" - "fmla v16.4s, v29.4s, v4.4s\n" - "ldr x16, [%[inptrs], 248]\n" - "fmla v13.4s, v29.4s, v7.4s\n" - "ldr x15, [%[inptrs], 208]\n" - "fmla v0.4s, v29.4s, v5.4s\n" - "fmla v12.4s, v29.4s, v19.4s\n" - "fmla v14.4s, v29.4s, v9.4s\n" - "fmla v10.4s, v29.4s, v22.4s\n" - "mov v21.16b, v25.16b\n" - "fmla v2.4s, v20.4s, v3.4s\n" - "fmla v16.4s, v20.4s, v6.4s\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "fmla v13.4s, v20.4s, v8.4s\n" - "fmla v0.4s, v20.4s, v7.4s\n" - "fmla v17.4s, v20.4s, v5.4s\n" - "fmla v12.4s, v20.4s, v9.4s\n" - "fmla v15.4s, v20.4s, v19.4s\n" - "fmla v11.4s, v20.4s, v22.4s\n" - "mov v20.16b, v25.16b\n" - "fmla v18.4s, v24.4s, v6.4s\n" - "fmla v0.4s, v24.4s, v8.4s\n" - "fmla v1.4s, v24.4s, v19.4s\n" - "fmla v17.4s, v24.4s, v7.4s\n" - "fmla v21.4s, v24.4s, v22.4s\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "ldr q27, [x7, x27]\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "ldr q30, [x24, x27]\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "ldr x7, [%[inptrs], 168]\n" - "fmla v17.4s, v23.4s, v8.4s\n" - "ldr q31, [x17, x27]\n" - "fmla v13.4s, v26.4s, v4.4s\n" - "ldr x24, [%[inptrs], 128]\n" - "fmla v14.4s, v26.4s, v7.4s\n" - "ldr x17, [%[inptrs], 88]\n" - "fmla v12.4s, v26.4s, v5.4s\n" - "fmla v10.4s, v26.4s, v19.4s\n" - "mov v24.16b, v25.16b\n" - "mov v23.16b, v25.16b\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v0.4s, v27.4s, v4.4s\n" - "fmla v14.4s, v27.4s, v8.4s\n" - "fmla v12.4s, v27.4s, v7.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "fmla v10.4s, v27.4s, v9.4s\n" - "fmla v11.4s, v27.4s, v19.4s\n" - "fmla v20.4s, v27.4s, v22.4s\n" - "ldr q25, [x25, x27]\n" - "fmla v18.4s, v30.4s, v3.4s\n" - "fmla v0.4s, v30.4s, v6.4s\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "fmla v12.4s, v30.4s, v8.4s\n" - "fmla v15.4s, v30.4s, v7.4s\n" - "fmla v1.4s, v30.4s, v5.4s\n" - "fmla v11.4s, v30.4s, v9.4s\n" - "fmla v21.4s, v30.4s, v19.4s\n" - "fmla v24.4s, v30.4s, v22.4s\n" - "ldr q26, [x16, x27]\n" - "fmla v17.4s, v31.4s, v6.4s\n" - "ldr x16, [%[inptrs], 256]\n" - "fmla v15.4s, v31.4s, v8.4s\n" - "fmla v1.4s, v31.4s, v7.4s\n" - "fmla v21.4s, v31.4s, v9.4s\n" - "ldr q31, [x15, x27]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "ldr x15, [%[inptrs], 216]\n" - "fmla v10.4s, v26.4s, v5.4s\n" - "ldr q29, [x7, x27]\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "ldr q28, [x24, x27]\n" - "fmla v13.4s, v31.4s, v3.4s\n" - "ldr x7, [%[inptrs], 176]\n" - "fmla v14.4s, v31.4s, v6.4s\n" - "ldr x24, [%[inptrs], 136]\n" - "fmla v12.4s, v31.4s, v4.4s\n" - "fmla v10.4s, v31.4s, v7.4s\n" - "fmla v11.4s, v31.4s, v5.4s\n" - "fmla v20.4s, v31.4s, v19.4s\n" - "fmla v0.4s, v29.4s, v3.4s\n" - "ldr q25, [x17, x27]\n" - "fmla v15.4s, v29.4s, v4.4s\n" - "fmla v21.4s, v29.4s, v5.4s\n" - "fmla v12.4s, v29.4s, v6.4s\n" - "fmla v10.4s, v29.4s, v8.4s\n" - "fmla v11.4s, v29.4s, v7.4s\n" - "fmla v20.4s, v29.4s, v9.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v23.4s, v29.4s, v22.4s\n" - "fmla v17.4s, v28.4s, v3.4s\n" - "ldr q29, [x16, x27]\n" - "fmla v15.4s, v28.4s, v6.4s\n" - "ldr q22, [x15, x27]\n" - "fmla v1.4s, v28.4s, v4.4s\n" - "ldr x16, [%[inptrs], 264]\n" - "fmla v11.4s, v28.4s, v8.4s\n" - "ldr x15, [%[inptrs], 224]\n" - "fmla v21.4s, v28.4s, v7.4s\n" - "fmla v24.4s, v28.4s, v9.4s\n" - "fmla v14.4s, v29.4s, v3.4s\n" - "ldr q27, [x7, x27]\n" - "fmla v1.4s, v25.4s, v6.4s\n" - "ldr x7, [%[inptrs], 184]\n" - "fmla v10.4s, v29.4s, v4.4s\n" - "fmla v20.4s, v29.4s, v5.4s\n" - "fmla v21.4s, v25.4s, v8.4s\n" - "ldr q26, [x24, x27]\n" - "fmla v12.4s, v22.4s, v3.4s\n" - "ldr q25, [x16, x27]\n" - "fmla v11.4s, v22.4s, v4.4s\n" - "ldr x16, [%[inptrs], 272]\n" - "fmla v10.4s, v22.4s, v6.4s\n" - "fmla v20.4s, v22.4s, v7.4s\n" - "fmla v24.4s, v22.4s, v5.4s\n" - "fmla v23.4s, v22.4s, v19.4s\n" - "fmla v15.4s, v27.4s, v3.4s\n" - "ldr q31, [x15, x27]\n" - "fmla v11.4s, v27.4s, v6.4s\n" - "ldr q22, [x7, x27]\n" - "fmla v21.4s, v27.4s, v4.4s\n" - "ldr x15, [%[inptrs], 232]\n" - "fmla v20.4s, v27.4s, v8.4s\n" - "fmla v24.4s, v27.4s, v7.4s\n" - "fmla v23.4s, v27.4s, v9.4s\n" - "ldr q19, [x16, x27]\n" - "fmla v1.4s, v26.4s, v3.4s\n" - "ldr q28, [x15, x27]\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr x16, [%[inptrs], 280]\n" - "fmla v24.4s, v26.4s, v8.4s\n" - "fmla v10.4s, v25.4s, v3.4s\n" - "fmla v20.4s, v25.4s, v4.4s\n" - "ldr q30, [x16, x27]\n" - "fmla v23.4s, v25.4s, v5.4s\n" - "add x27, x27, #16\n" - "fmla v11.4s, v31.4s, v3.4s\n" - "fmla v21.4s, v22.4s, v3.4s\n" - "fmla v24.4s, v31.4s, v4.4s\n" - "movi v29.16b, #0\n" - "fmla v20.4s, v31.4s, v6.4s\n" - "fmla v23.4s, v31.4s, v7.4s\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "fmla v24.4s, v22.4s, v6.4s\n" - "fmax v17.4s, v17.4s, v29.4s\n" - "fmla v20.4s, v19.4s, v3.4s\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "str q2, [x20, x28]\n" - "fmla v23.4s, v22.4s, v8.4s\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "ldr x20, [%[outptrs], 8]\n" - "fmla v24.4s, v28.4s, v3.4s\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "str q18, [x20, x28]\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "str q16, [x21, x28]\n" - "fmla v23.4s, v19.4s, v4.4s\n" - "fmax v21.4s, v21.4s, v29.4s\n" - "ldr x20, [%[outptrs], 16]\n" - "fmax v13.4s, v13.4s, v29.4s\n" - "ldr x21, [%[outptrs], 40]\n" - "str q17, [x20, x28]\n" - "fmax v12.4s, v12.4s, v29.4s\n" - "str q0, [x21, x28]\n" - "fmla v23.4s, v28.4s, v6.4s\n" - "str q13, [x22, x28]\n" - "fmax v11.4s, v11.4s, v29.4s\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "ldr x20, [%[outptrs], 24]\n" - "fmax v14.4s, v14.4s, v29.4s\n" - "ldr x21, [%[outptrs], 48]\n" - "str q1, [x20, x28]\n" - "fmla v23.4s, v30.4s, v3.4s\n" - "str q15, [x21, x28]\n" - "fmax v10.4s, v10.4s, v29.4s\n" - "str q14, [x23, x28]\n" - "fmax v20.4s, v20.4s, v29.4s\n" - "ldr x21, [%[outptrs], 56]\n" - "ldr x22, [%[outptrs], 72]\n" - "ldr x23, [%[outptrs], 104]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "str q21, [x21, x28]\n" - "str q12, [x22, x28]\n" - "str q10, [x23, x28]\n" - "ldr x22, [%[outptrs], 80]\n" - "ldr x23, [%[outptrs], 112]\n" - "str q11, [x22, x28]\n" - "str q20, [x23, x28]\n" - "ldr x22, [%[outptrs], 88]\n" - "ldr x23, [%[outptrs], 120]\n" - "str q24, [x22, x28]\n" - "str q23, [x23, x28]\n" - "add x28, x28, #16\n" - "4:\n" - "cbz x19, 7f\n" - "ldr s25, [%[wbptr]]\n" - "mov v2.16b, v25.16b\n" - "ldr s22, [%[wbptr], #4]\n" - "mov v16.16b, v25.16b\n" - "ldr s9, [%[wbptr], #8]\n" - "mov v18.16b, v25.16b\n" - "ldr s8, [%[wbptr], #12]\n" - "mov v13.16b, v25.16b\n" - "ldr s19, [%[wbptr], #16]\n" - "mov v0.16b, v25.16b\n" - "ldr s7, [%[wbptr], #20]\n" - "mov v17.16b, v25.16b\n" - "ldr s6, [%[wbptr], #24]\n" - "mov v14.16b, v25.16b\n" - "ldr s5, [%[wbptr], #28]\n" - "mov v12.16b, v25.16b\n" - "ldr s4, [%[wbptr], #32]\n" - "mov v15.16b, v25.16b\n" - "ldr s3, [%[wbptr], #36]\n" - "ldr x25, [%[inptrs], 0]\n" - "ldr x17, [%[inptrs], 48]\n" - "ldr x24, [%[inptrs], 96]\n" - "ldr x7, [%[inptrs], 144]\n" - "subs x19, x19, #1\n" - "ldr s27, [x25, x27]\n" - "fmla v2.4s, v27.4s, v22.4s\n" - "ldr s26, [x17, x27]\n" - "fmla v16.4s, v26.4s, v22.4s\n" - "ldr s28, [x24, x27]\n" - "ldr s27, [x7, x27]\n" - "ldr x25, [%[inptrs], 8]\n" - "ldr x17, [%[inptrs], 56]\n" - "ldr x24, [%[inptrs], 104]\n" - "ldr s31, [x25, x27]\n" - "fmla v2.4s, v26.4s, v19.4s\n" - "ldr s29, [x17, x27]\n" - "ldr s21, [x24, x27]\n" - "ldr x25, [%[inptrs], 16]\n" - "ldr s30, [x25, x27]\n" - "fmla v2.4s, v31.4s, v9.4s\n" - "beq 6f\n" - "5:\n" - "mov v1.16b, v25.16b\n" - "ldr x17, [%[inptrs], 64]\n" - "mov v10.16b, v25.16b\n" - "ldr x25, [%[inptrs], 24]\n" - "fmla v18.4s, v31.4s, v22.4s\n" - "ldr s23, [x17, x27]\n" - "fmla v2.4s, v28.4s, v5.4s\n" - "ldr x15, [%[inptrs], 192]\n" - "fmla v16.4s, v28.4s, v19.4s\n" - "ldr x7, [%[inptrs], 152]\n" - "fmla v13.4s, v28.4s, v22.4s\n" - "ldr s26, [x25, x27]\n" - "fmla v18.4s, v29.4s, v19.4s\n" - "ldr x24, [%[inptrs], 112]\n" - "fmla v2.4s, v29.4s, v7.4s\n" - "ldr x17, [%[inptrs], 72]\n" - "fmla v16.4s, v29.4s, v9.4s\n" - "ldr x25, [%[inptrs], 32]\n" - "fmla v0.4s, v29.4s, v22.4s\n" - "ldr s28, [x15, x27]\n" - "fmla v18.4s, v30.4s, v9.4s\n" - "ldr x16, [%[inptrs], 240]\n" - "fmla v2.4s, v30.4s, v8.4s\n" - "ldr x15, [%[inptrs], 200]\n" - "fmla v17.4s, v30.4s, v22.4s\n" - "ldr s29, [x7, x27]\n" - "fmla v16.4s, v27.4s, v5.4s\n" - "ldr x7, [%[inptrs], 160]\n" - "fmla v13.4s, v27.4s, v19.4s\n" - "ldr x20, [%[outptrs], 0]\n" - "fmla v14.4s, v27.4s, v22.4s\n" - "ldr s20, [x24, x27]\n" - "fmla v2.4s, v21.4s, v4.4s\n" - "ldr x24, [%[inptrs], 120]\n" - "fmla v16.4s, v21.4s, v7.4s\n" - "ldr x21, [%[outptrs], 32]\n" - "fmla v18.4s, v21.4s, v5.4s\n" - "ldr x22, [%[outptrs], 64]\n" - "fmla v13.4s, v21.4s, v9.4s\n" - "ldr x23, [%[outptrs], 96]\n" - "fmla v0.4s, v21.4s, v19.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v12.4s, v21.4s, v22.4s\n" - "ldr s24, [x17, x27]\n" - "fmla v2.4s, v23.4s, v6.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v16.4s, v23.4s, v8.4s\n" - "ldr x17, [%[inptrs], 80]\n" - "fmla v18.4s, v23.4s, v7.4s\n" - "subs x19, x19, #1\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "fmla v17.4s, v23.4s, v19.4s\n" - "fmla v15.4s, v23.4s, v22.4s\n" - "ldr s23, [x25, x27]\n" - "fmla v1.4s, v26.4s, v22.4s\n" - "ldr x25, [%[inptrs], 40]\n" - "fmla v18.4s, v26.4s, v8.4s\n" - "fmla v13.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v26.4s, v9.4s\n" - "ldr s30, [x16, x27]\n" - "fmla v14.4s, v28.4s, v19.4s\n" - "ldr s26, [x15, x27]\n" - "fmla v16.4s, v29.4s, v4.4s\n" - "ldr x16, [%[inptrs], 248]\n" - "fmla v13.4s, v29.4s, v7.4s\n" - "ldr x15, [%[inptrs], 208]\n" - "fmla v0.4s, v29.4s, v5.4s\n" - "fmla v12.4s, v29.4s, v19.4s\n" - "fmla v14.4s, v29.4s, v9.4s\n" - "fmla v10.4s, v29.4s, v22.4s\n" - "mov v11.16b, v25.16b\n" - "fmla v2.4s, v20.4s, v3.4s\n" - "fmla v16.4s, v20.4s, v6.4s\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "fmla v13.4s, v20.4s, v8.4s\n" - "fmla v0.4s, v20.4s, v7.4s\n" - "fmla v17.4s, v20.4s, v5.4s\n" - "fmla v12.4s, v20.4s, v9.4s\n" - "fmla v15.4s, v20.4s, v19.4s\n" - "fmla v11.4s, v20.4s, v22.4s\n" - "mov v21.16b, v25.16b\n" - "fmla v18.4s, v24.4s, v6.4s\n" - "fmla v0.4s, v24.4s, v8.4s\n" - "fmla v1.4s, v24.4s, v19.4s\n" - "fmla v17.4s, v24.4s, v7.4s\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "mov v20.16b, v25.16b\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "fmla v21.4s, v24.4s, v22.4s\n" - "ldr s27, [x7, x27]\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "ldr x7, [%[inptrs], 168]\n" - "fmla v17.4s, v23.4s, v8.4s\n" - "ldr s30, [x24, x27]\n" - "fmla v13.4s, v26.4s, v4.4s\n" - "ldr x24, [%[inptrs], 128]\n" - "fmla v14.4s, v26.4s, v7.4s\n" - "fmla v12.4s, v26.4s, v5.4s\n" - "fmla v10.4s, v26.4s, v19.4s\n" - "ldr s31, [x17, x27]\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "ldr x17, [%[inptrs], 88]\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v0.4s, v27.4s, v4.4s\n" - "fmla v14.4s, v27.4s, v8.4s\n" - "fmla v12.4s, v27.4s, v7.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "fmla v10.4s, v27.4s, v9.4s\n" - "fmla v11.4s, v27.4s, v19.4s\n" - "fmla v20.4s, v27.4s, v22.4s\n" - "mov v24.16b, v25.16b\n" - "mov v23.16b, v25.16b\n" - "fmla v18.4s, v30.4s, v3.4s\n" - "fmla v0.4s, v30.4s, v6.4s\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "fmla v12.4s, v30.4s, v8.4s\n" - "fmla v15.4s, v30.4s, v7.4s\n" - "fmla v1.4s, v30.4s, v5.4s\n" - "fmla v11.4s, v30.4s, v9.4s\n" - "fmla v21.4s, v30.4s, v19.4s\n" - "fmla v24.4s, v30.4s, v22.4s\n" - "ldr s25, [x25, x27]\n" - "fmla v17.4s, v31.4s, v6.4s\n" - "ldr x25, [%[inptrs], 0]\n" - "fmla v15.4s, v31.4s, v8.4s\n" - "fmla v1.4s, v31.4s, v7.4s\n" - "fmla v21.4s, v31.4s, v9.4s\n" - "ldr s26, [x16, x27]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "ldr x16, [%[inptrs], 256]\n" - "fmla v10.4s, v26.4s, v5.4s\n" - "ldr s31, [x15, x27]\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "ldr s29, [x7, x27]\n" - "fmla v13.4s, v31.4s, v3.4s\n" - "ldr x15, [%[inptrs], 216]\n" - "fmla v14.4s, v31.4s, v6.4s\n" - "ldr x7, [%[inptrs], 176]\n" - "fmla v12.4s, v31.4s, v4.4s\n" - "fmla v10.4s, v31.4s, v7.4s\n" - "fmla v11.4s, v31.4s, v5.4s\n" - "fmla v20.4s, v31.4s, v19.4s\n" - "fmla v0.4s, v29.4s, v3.4s\n" - "ldr s28, [x24, x27]\n" - "fmla v15.4s, v29.4s, v4.4s\n" - "ldr x24, [%[inptrs], 136]\n" - "fmla v12.4s, v29.4s, v6.4s\n" - "fmla v10.4s, v29.4s, v8.4s\n" - "fmla v11.4s, v29.4s, v7.4s\n" - "fmla v21.4s, v29.4s, v5.4s\n" - "fmla v20.4s, v29.4s, v9.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v23.4s, v29.4s, v22.4s\n" - "ldr s25, [x17, x27]\n" - "fmla v17.4s, v28.4s, v3.4s\n" - "ldr s29, [x16, x27]\n" - "fmla v15.4s, v28.4s, v6.4s\n" - "ldr x16, [%[inptrs], 264]\n" - "fmla v1.4s, v28.4s, v4.4s\n" - "ldr x17, [%[inptrs], 48]\n" - "fmla v11.4s, v28.4s, v8.4s\n" - "fmla v21.4s, v28.4s, v7.4s\n" - "fmla v24.4s, v28.4s, v9.4s\n" - "ldr s22, [x15, x27]\n" - "fmla v14.4s, v29.4s, v3.4s\n" - "ldr x15, [%[inptrs], 224]\n" - "fmla v1.4s, v25.4s, v6.4s\n" - "fmla v10.4s, v29.4s, v4.4s\n" - "fmla v21.4s, v25.4s, v8.4s\n" - "ldr s27, [x7, x27]\n" - "fmla v20.4s, v29.4s, v5.4s\n" - "ldr s26, [x24, x27]\n" - "fmla v12.4s, v22.4s, v3.4s\n" - "ldr x7, [%[inptrs], 184]\n" - "fmla v10.4s, v22.4s, v6.4s\n" - "ldr x24, [%[inptrs], 96]\n" - "fmla v11.4s, v22.4s, v4.4s\n" - "fmla v24.4s, v22.4s, v5.4s\n" - "fmla v20.4s, v22.4s, v7.4s\n" - "fmla v23.4s, v22.4s, v19.4s\n" - "fmla v15.4s, v27.4s, v3.4s\n" - "ldr s25, [x16, x27]\n" - "fmla v21.4s, v27.4s, v4.4s\n" - "ldr s31, [x15, x27]\n" - "fmla v11.4s, v27.4s, v6.4s\n" - "ldr x16, [%[inptrs], 272]\n" - "fmla v20.4s, v27.4s, v8.4s\n" - "ldr x15, [%[inptrs], 232]\n" - "fmla v24.4s, v27.4s, v7.4s\n" - "fmla v23.4s, v27.4s, v9.4s\n" - "fmla v1.4s, v26.4s, v3.4s\n" - "ldr s22, [x7, x27]\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr s19, [x16, x27]\n" - "fmla v10.4s, v25.4s, v3.4s\n" - "ldr x16, [%[inptrs], 280]\n" - "fmla v24.4s, v26.4s, v8.4s\n" - "ldr s28, [x15, x27]\n" - "fmla v20.4s, v25.4s, v4.4s\n" - "ldr x7, [%[inptrs], 144]\n" - "fmla v23.4s, v25.4s, v5.4s\n" - "ldr s30, [x16, x27]\n" - "fmla v11.4s, v31.4s, v3.4s\n" - "add x27, x27, #4\n" - "fmla v24.4s, v31.4s, v4.4s\n" - "ldr s27, [x25, x27]\n" - "fmla v20.4s, v31.4s, v6.4s\n" - "ldr x25, [%[inptrs], 8]\n" - "fmla v23.4s, v31.4s, v7.4s\n" - "movi v29.16b, #0\n" - "fmla v21.4s, v22.4s, v3.4s\n" - "ldr s26, [x17, x27]\n" - "fmla v24.4s, v22.4s, v6.4s\n" - "ldr x17, [%[inptrs], 56]\n" - "fmla v20.4s, v19.4s, v3.4s\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmla v23.4s, v22.4s, v8.4s\n" - "ldr s25, [%[wbptr]]\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "ldr s22, [%[wbptr], #4]\n" - "str s2, [x20, x28]\n" - "fmla v24.4s, v28.4s, v3.4s\n" - "fmax v17.4s, v17.4s, v29.4s\n" - "ldr s9, [%[wbptr], #8]\n" - "fmla v23.4s, v19.4s, v4.4s\n" - "ldr s8, [%[wbptr], #12]\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "ldr s19, [%[wbptr], #16]\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "ldr x20, [%[outptrs], 8]\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "str s18, [x20, x28]\n" - "fmla v23.4s, v28.4s, v6.4s\n" - "str s16, [x21, x28]\n" - "fmax v21.4s, v21.4s, v29.4s\n" - "fmax v13.4s, v13.4s, v29.4s\n" - "ldr s7, [%[wbptr], #20]\n" - "fmax v12.4s, v12.4s, v29.4s\n" - "ldr s5, [%[wbptr], #28]\n" - "fmla v23.4s, v30.4s, v3.4s\n" - "ldr s6, [%[wbptr], #24]\n" - "str s13, [x22, x28]\n" - "fmax v11.4s, v11.4s, v29.4s\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "ldr s4, [%[wbptr], #32]\n" - "fmax v14.4s, v14.4s, v29.4s\n" - "ldr s31, [x25, x27]\n" - "fmax v10.4s, v10.4s, v29.4s\n" - "ldr s3, [%[wbptr], #36]\n" - "fmax v20.4s, v20.4s, v29.4s\n" - "ldr s28, [x24, x27]\n" - "str s14, [x23, x28]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "mov v2.16b, v25.16b\n" - "ldr s29, [x17, x27]\n" - "ldr x20, [%[outptrs], 16]\n" - "ldr x21, [%[outptrs], 40]\n" - "ldr x22, [%[outptrs], 72]\n" - "ldr x23, [%[outptrs], 104]\n" - "ldr x25, [%[inptrs], 16]\n" - "ldr x24, [%[inptrs], 104]\n" - "str s17, [x20, x28]\n" - "mov v16.16b, v25.16b\n" - "str s0, [x21, x28]\n" - "mov v18.16b, v25.16b\n" - "str s12, [x22, x28]\n" - "mov v13.16b, v25.16b\n" - "str s10, [x23, x28]\n" - "mov v0.16b, v25.16b\n" - "fmla v2.4s, v27.4s, v22.4s\n" - "ldr s30, [x25, x27]\n" - "fmla v16.4s, v26.4s, v22.4s\n" - "ldr x20, [%[outptrs], 24]\n" - "mov v17.16b, v25.16b\n" - "ldr x21, [%[outptrs], 48]\n" - "str s1, [x20, x28]\n" - "mov v14.16b, v25.16b\n" - "str s15, [x21, x28]\n" - "mov v12.16b, v25.16b\n" - "mov v15.16b, v25.16b\n" - "ldr x21, [%[outptrs], 56]\n" - "fmla v2.4s, v26.4s, v19.4s\n" - "ldr s27, [x7, x27]\n" - "str s21, [x21, x28]\n" - "ldr x22, [%[outptrs], 80]\n" - "ldr s21, [x24, x27]\n" - "ldr x23, [%[outptrs], 112]\n" - "str s11, [x22, x28]\n" - "fmla v2.4s, v31.4s, v9.4s\n" - "str s20, [x23, x28]\n" - "ldr x22, [%[outptrs], 88]\n" - "ldr x23, [%[outptrs], 120]\n" - "str s24, [x22, x28]\n" - "str s23, [x23, x28]\n" - "add x28, x28, #4\n" - "bne 5b\n" - "6:\n" - "mov v1.16b, v25.16b\n" - "ldr x17, [%[inptrs], 64]\n" - "mov v10.16b, v25.16b\n" - "ldr x25, [%[inptrs], 24]\n" - "mov v11.16b, v25.16b\n" - "ldr x15, [%[inptrs], 192]\n" - "fmla v18.4s, v31.4s, v22.4s\n" - "ldr s23, [x17, x27]\n" - "fmla v2.4s, v28.4s, v5.4s\n" - "ldr x7, [%[inptrs], 152]\n" - "fmla v16.4s, v28.4s, v19.4s\n" - "ldr x24, [%[inptrs], 112]\n" - "fmla v13.4s, v28.4s, v22.4s\n" - "ldr s26, [x25, x27]\n" - "fmla v18.4s, v29.4s, v19.4s\n" - "ldr x17, [%[inptrs], 72]\n" - "fmla v2.4s, v29.4s, v7.4s\n" - "ldr x25, [%[inptrs], 32]\n" - "fmla v16.4s, v29.4s, v9.4s\n" - "ldr x16, [%[inptrs], 240]\n" - "fmla v0.4s, v29.4s, v22.4s\n" - "ldr s28, [x15, x27]\n" - "fmla v18.4s, v30.4s, v9.4s\n" - "ldr x15, [%[inptrs], 200]\n" - "fmla v2.4s, v30.4s, v8.4s\n" - "ldr x20, [%[outptrs], 0]\n" - "fmla v17.4s, v30.4s, v22.4s\n" - "ldr s29, [x7, x27]\n" - "fmla v16.4s, v27.4s, v5.4s\n" - "ldr x7, [%[inptrs], 160]\n" - "fmla v13.4s, v27.4s, v19.4s\n" - "ldr x21, [%[outptrs], 32]\n" - "fmla v14.4s, v27.4s, v22.4s\n" - "ldr s20, [x24, x27]\n" - "fmla v2.4s, v21.4s, v4.4s\n" - "ldr x24, [%[inptrs], 120]\n" - "fmla v16.4s, v21.4s, v7.4s\n" - "ldr x22, [%[outptrs], 64]\n" - "fmla v18.4s, v21.4s, v5.4s\n" - "ldr x23, [%[outptrs], 96]\n" - "fmla v13.4s, v21.4s, v9.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v0.4s, v21.4s, v19.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v12.4s, v21.4s, v22.4s\n" - "ldr s24, [x17, x27]\n" - "fmla v2.4s, v23.4s, v6.4s\n" - "ldr x17, [%[inptrs], 80]\n" - "fmla v16.4s, v23.4s, v8.4s\n" - "fmla v18.4s, v23.4s, v7.4s\n" - "fmla v0.4s, v23.4s, v9.4s\n" - "fmla v17.4s, v23.4s, v19.4s\n" - "fmla v15.4s, v23.4s, v22.4s\n" - "ldr s23, [x25, x27]\n" - "fmla v1.4s, v26.4s, v22.4s\n" - "ldr x25, [%[inptrs], 40]\n" - "fmla v18.4s, v26.4s, v8.4s\n" - "fmla v13.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v26.4s, v9.4s\n" - "ldr s30, [x16, x27]\n" - "fmla v14.4s, v28.4s, v19.4s\n" - "ldr s26, [x15, x27]\n" - "fmla v16.4s, v29.4s, v4.4s\n" - "ldr x16, [%[inptrs], 248]\n" - "fmla v13.4s, v29.4s, v7.4s\n" - "ldr x15, [%[inptrs], 208]\n" - "fmla v0.4s, v29.4s, v5.4s\n" - "fmla v12.4s, v29.4s, v19.4s\n" - "fmla v14.4s, v29.4s, v9.4s\n" - "fmla v10.4s, v29.4s, v22.4s\n" - "mov v21.16b, v25.16b\n" - "fmla v2.4s, v20.4s, v3.4s\n" - "fmla v16.4s, v20.4s, v6.4s\n" - "fmla v18.4s, v20.4s, v4.4s\n" - "fmla v13.4s, v20.4s, v8.4s\n" - "fmla v0.4s, v20.4s, v7.4s\n" - "fmla v17.4s, v20.4s, v5.4s\n" - "fmla v12.4s, v20.4s, v9.4s\n" - "fmla v15.4s, v20.4s, v19.4s\n" - "fmla v11.4s, v20.4s, v22.4s\n" - "mov v20.16b, v25.16b\n" - "fmla v18.4s, v24.4s, v6.4s\n" - "fmla v0.4s, v24.4s, v8.4s\n" - "fmla v1.4s, v24.4s, v19.4s\n" - "fmla v17.4s, v24.4s, v7.4s\n" - "fmla v21.4s, v24.4s, v22.4s\n" - "fmla v15.4s, v24.4s, v9.4s\n" - "ldr s27, [x7, x27]\n" - "fmla v14.4s, v30.4s, v5.4s\n" - "ldr s30, [x24, x27]\n" - "fmla v1.4s, v23.4s, v9.4s\n" - "ldr x7, [%[inptrs], 168]\n" - "fmla v17.4s, v23.4s, v8.4s\n" - "ldr s31, [x17, x27]\n" - "fmla v13.4s, v26.4s, v4.4s\n" - "ldr x24, [%[inptrs], 128]\n" - "fmla v14.4s, v26.4s, v7.4s\n" - "ldr x17, [%[inptrs], 88]\n" - "fmla v12.4s, v26.4s, v5.4s\n" - "fmla v10.4s, v26.4s, v19.4s\n" - "mov v24.16b, v25.16b\n" - "mov v23.16b, v25.16b\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v0.4s, v27.4s, v4.4s\n" - "fmla v14.4s, v27.4s, v8.4s\n" - "fmla v12.4s, v27.4s, v7.4s\n" - "fmla v15.4s, v27.4s, v5.4s\n" - "fmla v10.4s, v27.4s, v9.4s\n" - "fmla v11.4s, v27.4s, v19.4s\n" - "fmla v20.4s, v27.4s, v22.4s\n" - "ldr s25, [x25, x27]\n" - "fmla v18.4s, v30.4s, v3.4s\n" - "fmla v0.4s, v30.4s, v6.4s\n" - "fmla v17.4s, v30.4s, v4.4s\n" - "fmla v12.4s, v30.4s, v8.4s\n" - "fmla v15.4s, v30.4s, v7.4s\n" - "fmla v1.4s, v30.4s, v5.4s\n" - "fmla v11.4s, v30.4s, v9.4s\n" - "fmla v21.4s, v30.4s, v19.4s\n" - "fmla v24.4s, v30.4s, v22.4s\n" - "ldr s26, [x16, x27]\n" - "fmla v17.4s, v31.4s, v6.4s\n" - "ldr x16, [%[inptrs], 256]\n" - "fmla v15.4s, v31.4s, v8.4s\n" - "fmla v1.4s, v31.4s, v7.4s\n" - "fmla v21.4s, v31.4s, v9.4s\n" - "ldr s31, [x15, x27]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "ldr x15, [%[inptrs], 216]\n" - "fmla v10.4s, v26.4s, v5.4s\n" - "ldr s29, [x7, x27]\n" - "fmla v1.4s, v25.4s, v8.4s\n" - "ldr s28, [x24, x27]\n" - "fmla v13.4s, v31.4s, v3.4s\n" - "ldr x7, [%[inptrs], 176]\n" - "fmla v14.4s, v31.4s, v6.4s\n" - "ldr x24, [%[inptrs], 136]\n" - "fmla v12.4s, v31.4s, v4.4s\n" - "fmla v10.4s, v31.4s, v7.4s\n" - "fmla v11.4s, v31.4s, v5.4s\n" - "fmla v20.4s, v31.4s, v19.4s\n" - "fmla v0.4s, v29.4s, v3.4s\n" - "ldr s25, [x17, x27]\n" - "fmla v15.4s, v29.4s, v4.4s\n" - "fmla v21.4s, v29.4s, v5.4s\n" - "fmla v12.4s, v29.4s, v6.4s\n" - "fmla v10.4s, v29.4s, v8.4s\n" - "fmla v11.4s, v29.4s, v7.4s\n" - "fmla v20.4s, v29.4s, v9.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v23.4s, v29.4s, v22.4s\n" - "fmla v17.4s, v28.4s, v3.4s\n" - "ldr s29, [x16, x27]\n" - "fmla v15.4s, v28.4s, v6.4s\n" - "ldr s22, [x15, x27]\n" - "fmla v1.4s, v28.4s, v4.4s\n" - "ldr x16, [%[inptrs], 264]\n" - "fmla v11.4s, v28.4s, v8.4s\n" - "ldr x15, [%[inptrs], 224]\n" - "fmla v21.4s, v28.4s, v7.4s\n" - "fmla v24.4s, v28.4s, v9.4s\n" - "fmla v14.4s, v29.4s, v3.4s\n" - "ldr s27, [x7, x27]\n" - "fmla v1.4s, v25.4s, v6.4s\n" - "ldr x7, [%[inptrs], 184]\n" - "fmla v10.4s, v29.4s, v4.4s\n" - "fmla v20.4s, v29.4s, v5.4s\n" - "fmla v21.4s, v25.4s, v8.4s\n" - "ldr s26, [x24, x27]\n" - "fmla v12.4s, v22.4s, v3.4s\n" - "ldr s25, [x16, x27]\n" - "fmla v11.4s, v22.4s, v4.4s\n" - "ldr x16, [%[inptrs], 272]\n" - "fmla v10.4s, v22.4s, v6.4s\n" - "fmla v20.4s, v22.4s, v7.4s\n" - "fmla v24.4s, v22.4s, v5.4s\n" - "fmla v23.4s, v22.4s, v19.4s\n" - "fmla v15.4s, v27.4s, v3.4s\n" - "ldr s31, [x15, x27]\n" - "fmla v11.4s, v27.4s, v6.4s\n" - "ldr s22, [x7, x27]\n" - "fmla v21.4s, v27.4s, v4.4s\n" - "ldr x15, [%[inptrs], 232]\n" - "fmla v20.4s, v27.4s, v8.4s\n" - "fmla v24.4s, v27.4s, v7.4s\n" - "fmla v23.4s, v27.4s, v9.4s\n" - "ldr s19, [x16, x27]\n" - "fmla v1.4s, v26.4s, v3.4s\n" - "ldr s28, [x15, x27]\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr x16, [%[inptrs], 280]\n" - "fmla v24.4s, v26.4s, v8.4s\n" - "fmla v10.4s, v25.4s, v3.4s\n" - "fmla v20.4s, v25.4s, v4.4s\n" - "ldr s30, [x16, x27]\n" - "fmla v23.4s, v25.4s, v5.4s\n" - "add x27, x27, #4\n" - "fmla v11.4s, v31.4s, v3.4s\n" - "fmla v21.4s, v22.4s, v3.4s\n" - "fmla v24.4s, v31.4s, v4.4s\n" - "movi v29.16b, #0\n" - "fmla v20.4s, v31.4s, v6.4s\n" - "fmla v23.4s, v31.4s, v7.4s\n" - "fmax v2.4s, v2.4s, v29.4s\n" - "fmax v18.4s, v18.4s, v29.4s\n" - "fmla v24.4s, v22.4s, v6.4s\n" - "fmax v17.4s, v17.4s, v29.4s\n" - "fmla v20.4s, v19.4s, v3.4s\n" - "fmax v1.4s, v1.4s, v29.4s\n" - "str s2, [x20, x28]\n" - "fmla v23.4s, v22.4s, v8.4s\n" - "fmax v16.4s, v16.4s, v29.4s\n" - "ldr x20, [%[outptrs], 8]\n" - "fmla v24.4s, v28.4s, v3.4s\n" - "fmax v0.4s, v0.4s, v29.4s\n" - "str s18, [x20, x28]\n" - "fmax v15.4s, v15.4s, v29.4s\n" - "str s16, [x21, x28]\n" - "fmla v23.4s, v19.4s, v4.4s\n" - "fmax v21.4s, v21.4s, v29.4s\n" - "ldr x20, [%[outptrs], 16]\n" - "fmax v13.4s, v13.4s, v29.4s\n" - "ldr x21, [%[outptrs], 40]\n" - "str s17, [x20, x28]\n" - "fmax v12.4s, v12.4s, v29.4s\n" - "str s0, [x21, x28]\n" - "fmla v23.4s, v28.4s, v6.4s\n" - "str s13, [x22, x28]\n" - "fmax v11.4s, v11.4s, v29.4s\n" - "fmax v24.4s, v24.4s, v29.4s\n" - "ldr x20, [%[outptrs], 24]\n" - "fmax v14.4s, v14.4s, v29.4s\n" - "ldr x21, [%[outptrs], 48]\n" - "str s1, [x20, x28]\n" - "fmla v23.4s, v30.4s, v3.4s\n" - "str s15, [x21, x28]\n" - "fmax v10.4s, v10.4s, v29.4s\n" - "str s14, [x23, x28]\n" - "fmax v20.4s, v20.4s, v29.4s\n" - "ldr x21, [%[outptrs], 56]\n" - "ldr x22, [%[outptrs], 72]\n" - "ldr x23, [%[outptrs], 104]\n" - "fmax v23.4s, v23.4s, v29.4s\n" - "str s21, [x21, x28]\n" - "str s12, [x22, x28]\n" - "str s10, [x23, x28]\n" - "ldr x22, [%[outptrs], 80]\n" - "ldr x23, [%[outptrs], 112]\n" - "str s11, [x22, x28]\n" - "str s20, [x23, x28]\n" - "ldr x22, [%[outptrs], 88]\n" - "ldr x23, [%[outptrs], 120]\n" - "str s24, [x22, x28]\n" - "str s23, [x23, x28]\n" - "add x28, x28, #4\n" - "7:\n" - : [wbptr] "+r" (weight_bias_ptr) - : [n_channels] "r" ((long) n_channels), [outptrs] "r" (outptrs), [inptrs] "r" (inptrs) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", "v9", "x15", "x16", "x17", "x7", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" - ); -} - -template <> -template <> -void Conv::execute_tile<ActivationFunction::ReLU6>( - int n_channels, - const void *weight_bias_ptr, - const float *input, - const unsigned int input_row_stride, - const unsigned int input_col_stride, - float *output, - const unsigned int output_row_stride, - const unsigned int output_col_stride -) -{ - __asm __volatile( - "add x24, %[inptr0], %[input_row_stride]\n" - "add x13, %[input_col_stride1], %[input_col_stride1]\n" - "add x8, %[outptr0], %[output_row_stride]\n" - "add x9, x24, %[input_row_stride]\n" - "add x10, x13, #64\n" - "add x19, x13, %[input_col_stride1]\n" - "add x20, x9, %[input_row_stride]\n" - "add x21, x19, #64\n" - "add x17, x19, %[input_col_stride1]\n" - "add x22, x20, %[input_row_stride]\n" - "add x7, x17, #64\n" - "add x11, x17, %[input_col_stride1]\n" - "add x23, x22, %[input_row_stride]\n" - "add x12, x11, #64\n" - "add x25, x8, %[output_row_stride]\n" - "add x26, x25, %[output_row_stride]\n" - "add x27, %[output_col_stride1], %[output_col_stride1]\n" - "and x14, %[n_channels], #3\n" - "add x28, x27, %[output_col_stride1]\n" - "lsr x15, %[n_channels], #2\n" - "cbz x15, 4f\n" - "1:\n" - "ldr q23, [%[wbptr]]\n" - "subs x15, x15, #1\n" - "mov v12.16b, v23.16b\n" - "ldr q20, [%[wbptr], #16]\n" - "mov v8.16b, v23.16b\n" - "ldr q6, [%[wbptr], #32]\n" - "mov v11.16b, v23.16b\n" - "ldr q5, [%[wbptr], #48]\n" - "mov v16.16b, v23.16b\n" - "ldr q19, [%[wbptr], #64]\n" - "mov v7.16b, v23.16b\n" - "ldr q4, [%[wbptr], #80]\n" - "mov v10.16b, v23.16b\n" - "ldr q3, [%[wbptr], #96]\n" - "mov v14.16b, v23.16b\n" - "ldr q2, [%[wbptr], #112]\n" - "mov v15.16b, v23.16b\n" - "ldr q1, [%[wbptr], #128]\n" - "mov v17.16b, v23.16b\n" - "ldr q0, [%[wbptr], #144]\n" - "mov v9.16b, v23.16b\n" - "ldr q28, [%[inptr0]]\n" - "fmla v12.4s, v28.4s, v20.4s\n" - "ldr q25, [x24]\n" - "fmla v8.4s, v25.4s, v20.4s\n" - "ldr q18, [%[inptr0], %[input_col_stride1]]\n" - "fmla v11.4s, v18.4s, v20.4s\n" - "ldr q30, [x9]\n" - "fmla v12.4s, v25.4s, v19.4s\n" - "ldr q29, [x24, %[input_col_stride1]]\n" - "fmla v8.4s, v30.4s, v19.4s\n" - "ldr q24, [%[inptr0], x13]\n" - "fmla v16.4s, v30.4s, v20.4s\n" - "ldr q27, [x20]\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "ldr q22, [x9, %[input_col_stride1]]\n" - "fmla v8.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x24, #64]\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v12.4s, v30.4s, v2.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "prfm pldl1keep, [x24, x16]\n" - "prfm pldl1keep, [%[inptr0], x10]\n" - "prfm pldl1keep, [x20, #64]\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v12.4s, v29.4s, v4.4s\n" - "beq 3f\n" - "2:\n" - "mov v13.16b, v23.16b\n" - "ldr q21, [x24, x13]\n" - "mov v18.16b, v23.16b\n" - "prfm pldl1keep, [x24, x10]\n" - "fmla v11.4s, v29.4s, v19.4s\n" - "prfm pldl1keep, [%[inptr0], x21]\n" - "fmla v7.4s, v29.4s, v20.4s\n" - "ldr q25, [%[inptr0], x19]\n" - "fmla v12.4s, v24.4s, v5.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v11.4s, v24.4s, v6.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v10.4s, v24.4s, v20.4s\n" - "ldr q24, [x22]\n" - "fmla v8.4s, v27.4s, v2.4s\n" - "prfm pldl1keep, [x9, x10]\n" - "fmla v16.4s, v27.4s, v19.4s\n" - "prfm pldl1keep, [x24, x21]\n" - "fmla v14.4s, v27.4s, v20.4s\n" - "ldr q26, [x20, %[input_col_stride1]]\n" - "fmla v12.4s, v22.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v8.4s, v22.4s, v4.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v11.4s, v22.4s, v2.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "prfm pldl1keep, [x20, x10]\n" - "fmla v7.4s, v22.4s, v19.4s\n" - "prfm pldl1keep, [x9, x21]\n" - "fmla v15.4s, v22.4s, v20.4s\n" - "ldr q30, [x9, x13]\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "prfm pldl1keep, [x24, x7]\n" - "fmla v8.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [%[inptr0], x12]\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v7.4s, v21.4s, v6.4s\n" - "prfm pldl1keep, [x22, x10]\n" - "fmla v10.4s, v21.4s, v19.4s\n" - "prfm pldl1keep, [x20, x21]\n" - "fmla v17.4s, v21.4s, v20.4s\n" - "ldr q22, [x24, x19]\n" - "fmla v11.4s, v25.4s, v5.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v10.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x24, x12]\n" - "fmla v9.4s, v25.4s, v20.4s\n" - "ldr q21, [%[inptr0], x17]\n" - "fmla v16.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x10]\n" - "fmla v14.4s, v24.4s, v19.4s\n" - "ldr q24, [x23]\n" - "fmla v8.4s, v26.4s, v1.4s\n" - "prfm pldl1keep, [x22, x21]\n" - "fmla v16.4s, v26.4s, v4.4s\n" - "prfm pldl1keep, [x20, x7]\n" - "fmla v7.4s, v26.4s, v2.4s\n" - "prfm pldl1keep, [x9, x12]\n" - "fmla v14.4s, v26.4s, v6.4s\n" - "prfm pldl1keep, [x23, x21]\n" - "fmla v15.4s, v26.4s, v19.4s\n" - "prfm pldl1keep, [x22, x7]\n" - "fmla v13.4s, v26.4s, v20.4s\n" - "ldr q26, [x22, %[input_col_stride1]]\n" - "fmla v12.4s, v30.4s, v0.4s\n" - "prfm pldl1keep, [x20, x12]\n" - "fmla v8.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x23, x7]\n" - "fmla v11.4s, v30.4s, v1.4s\n" - "prfm pldl1keep, [x22, x12]\n" - "fmla v16.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x23, x12]\n" - "fmla v7.4s, v30.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v10.4s, v30.4s, v2.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "subs x15, x15, #1\n" - "fmla v17.4s, v30.4s, v19.4s\n" - "fmla v18.4s, v30.4s, v20.4s\n" - "mov v25.16b, v23.16b\n" - "fmla v11.4s, v22.4s, v3.4s\n" - "fmla v7.4s, v22.4s, v5.4s\n" - "fmla v10.4s, v22.4s, v4.4s\n" - "fmla v17.4s, v22.4s, v6.4s\n" - "fmla v9.4s, v22.4s, v19.4s\n" - "fmla v25.4s, v22.4s, v20.4s\n" - "ldr q27, [x20, x13]\n" - "fmla v10.4s, v21.4s, v5.4s\n" - "fmla v14.4s, v24.4s, v2.4s\n" - "mov v22.16b, v23.16b\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "mov v24.16b, v23.16b\n" - "mov v21.16b, v23.16b\n" - "fmla v16.4s, v26.4s, v1.4s\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "fmla v13.4s, v26.4s, v19.4s\n" - "fmla v8.4s, v27.4s, v0.4s\n" - "ldr q28, [x9, x19]\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "fmla v7.4s, v27.4s, v1.4s\n" - "fmla v14.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v18.4s, v27.4s, v19.4s\n" - "fmla v22.4s, v27.4s, v20.4s\n" - "fmla v11.4s, v28.4s, v0.4s\n" - "ldr q29, [x24, x17]\n" - "fmla v7.4s, v28.4s, v3.4s\n" - "fmla v10.4s, v28.4s, v1.4s\n" - "fmla v15.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v28.4s, v4.4s\n" - "fmla v9.4s, v28.4s, v2.4s\n" - "fmla v18.4s, v28.4s, v6.4s\n" - "fmla v25.4s, v28.4s, v19.4s\n" - "fmla v24.4s, v28.4s, v20.4s\n" - "fmla v10.4s, v29.4s, v3.4s\n" - "ldr q23, [%[inptr0], x11]\n" - "fmla v17.4s, v29.4s, v5.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v9.4s, v29.4s, v4.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v25.4s, v29.4s, v6.4s\n" - "ldr q30, [x23, %[input_col_stride1]]\n" - "fmla v14.4s, v30.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v9.4s, v23.4s, v5.4s\n" - "ldr q23, [x22, x13]\n" - "fmla v13.4s, v30.4s, v2.4s\n" - "ldr q29, [x20, x19]\n" - "fmla v16.4s, v23.4s, v0.4s\n" - "prfm pldl1keep, [%[inptr0], x10]\n" - "fmla v14.4s, v23.4s, v3.4s\n" - "fmla v15.4s, v23.4s, v1.4s\n" - "fmla v13.4s, v23.4s, v4.4s\n" - "fmla v18.4s, v23.4s, v2.4s\n" - "fmla v22.4s, v23.4s, v19.4s\n" - "ldr q23, [x9, x17]\n" - "fmla v7.4s, v29.4s, v0.4s\n" - "fmla v15.4s, v29.4s, v3.4s\n" - "fmla v17.4s, v29.4s, v1.4s\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "fmla v18.4s, v29.4s, v4.4s\n" - "fmla v25.4s, v29.4s, v2.4s\n" - "fmla v22.4s, v29.4s, v6.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v21.4s, v29.4s, v20.4s\n" - "ldr q26, [x24, x11]\n" - "fmla v10.4s, v23.4s, v0.4s\n" - "ldr q28, [x23, x13]\n" - "fmla v17.4s, v23.4s, v3.4s\n" - "add x24, x24, #16\n" - "fmla v9.4s, v23.4s, v1.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v18.4s, v23.4s, v5.4s\n" - "prfm pldl1keep, [x24, x16]\n" - "fmla v25.4s, v23.4s, v4.4s\n" - "fmla v24.4s, v23.4s, v6.4s\n" - "fmla v9.4s, v26.4s, v3.4s\n" - "ldr q20, [x22, x19]\n" - "fmla v14.4s, v28.4s, v0.4s\n" - "fmla v13.4s, v28.4s, v1.4s\n" - "fmla v25.4s, v26.4s, v5.4s\n" - "ldr q26, [x20, x17]\n" - "fmla v22.4s, v28.4s, v2.4s\n" - "ldr q23, [x9, x11]\n" - "fmla v15.4s, v20.4s, v0.4s\n" - "add x9, x9, #16\n" - "fmla v13.4s, v20.4s, v3.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "fmla v18.4s, v20.4s, v1.4s\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v22.4s, v20.4s, v4.4s\n" - "fmla v24.4s, v20.4s, v2.4s\n" - "fmla v21.4s, v20.4s, v19.4s\n" - "ldr q27, [x23, x19]\n" - "fmla v17.4s, v26.4s, v0.4s\n" - "ldr q20, [x22, x17]\n" - "fmla v18.4s, v26.4s, v3.4s\n" - "fmla v25.4s, v26.4s, v1.4s\n" - "fmla v22.4s, v26.4s, v5.4s\n" - "fmla v24.4s, v26.4s, v4.4s\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr q19, [x20, x11]\n" - "fmla v9.4s, v23.4s, v0.4s\n" - "ldr q28, [x23, x17]\n" - "fmla v25.4s, v23.4s, v3.4s\n" - "add x20, x20, #16\n" - "fmla v24.4s, v23.4s, v5.4s\n" - "ldr q29, [x22, x11]\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "prfm pldl1keep, [x20, #64]\n" - "fmla v22.4s, v27.4s, v1.4s\n" - "add x22, x22, #16\n" - "fmla v21.4s, v27.4s, v2.4s\n" - "ldr q30, [x23, x11]\n" - "fmla v18.4s, v20.4s, v0.4s\n" - "ldr q23, [%[wbptr]]\n" - "fmla v22.4s, v20.4s, v3.4s\n" - "add x23, x23, #16\n" - "fmla v24.4s, v20.4s, v1.4s\n" - "fmla v21.4s, v20.4s, v4.4s\n" - "fmla v25.4s, v19.4s, v0.4s\n" - "ldr q20, [%[wbptr], #16]\n" - "fmla v22.4s, v28.4s, v0.4s\n" - "ldr q6, [%[wbptr], #32]\n" - "fmla v21.4s, v19.4s, v5.4s\n" - "movi v26.16b, #0\n" - "fmla v24.4s, v19.4s, v3.4s\n" - "ldr q19, [%[wbptr], #64]\n" - "fmax v12.4s, v12.4s, v26.4s\n" - "fmax v11.4s, v11.4s, v26.4s\n" - "fmla v21.4s, v28.4s, v1.4s\n" - "ldr q5, [%[wbptr], #48]\n" - "fmla v24.4s, v29.4s, v0.4s\n" - "ldr q4, [%[wbptr], #80]\n" - "fmax v10.4s, v10.4s, v26.4s\n" - "fmax v9.4s, v9.4s, v26.4s\n" - "fmla v21.4s, v29.4s, v3.4s\n" - "ldr q2, [%[wbptr], #112]\n" - "fmov v27.4s, #6.0\n" - "fmax v8.4s, v8.4s, v26.4s\n" - "fmax v7.4s, v7.4s, v26.4s\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "fmla v21.4s, v30.4s, v0.4s\n" - "ldr q3, [%[wbptr], #96]\n" - "fmin v12.4s, v12.4s, v27.4s\n" - "ldr q1, [%[wbptr], #128]\n" - "fmin v11.4s, v11.4s, v27.4s\n" - "fmin v10.4s, v10.4s, v27.4s\n" - "str q12, [%[outptr0]]\n" - "fmin v9.4s, v9.4s, v27.4s\n" - "str q11, [%[outptr0], %[output_col_stride1]]\n" - "fmin v8.4s, v8.4s, v27.4s\n" - "str q10, [%[outptr0], x27]\n" - "fmin v7.4s, v7.4s, v27.4s\n" - "str q9, [%[outptr0], x28]\n" - "fmin v17.4s, v17.4s, v27.4s\n" - "str q8, [x8]\n" - "fmax v25.4s, v25.4s, v26.4s\n" - "str q7, [x8, %[output_col_stride1]]\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "str q17, [x8, x27]\n" - "fmin v25.4s, v25.4s, v27.4s\n" - "fmin v16.4s, v16.4s, v27.4s\n" - "ldr q0, [%[wbptr], #144]\n" - "str q25, [x8, x28]\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "str q16, [x25]\n" - "fmax v18.4s, v18.4s, v26.4s\n" - "fmin v15.4s, v15.4s, v27.4s\n" - "ldr q28, [%[inptr0]]\n" - "fmin v18.4s, v18.4s, v27.4s\n" - "ldr q25, [x24]\n" - "str q15, [x25, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v26.4s\n" - "str q18, [x25, x27]\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "fmin v24.4s, v24.4s, v27.4s\n" - "ldr q18, [%[inptr0], %[input_col_stride1]]\n" - "fmin v14.4s, v14.4s, v27.4s\n" - "ldr q30, [x9]\n" - "str q24, [x25, x28]\n" - "fmax v13.4s, v13.4s, v26.4s\n" - "str q14, [x26]\n" - "fmax v22.4s, v22.4s, v26.4s\n" - "fmin v13.4s, v13.4s, v27.4s\n" - "ldr q29, [x24, %[input_col_stride1]]\n" - "fmin v22.4s, v22.4s, v27.4s\n" - "ldr q24, [%[inptr0], x13]\n" - "str q13, [x26, %[output_col_stride1]]\n" - "fmax v21.4s, v21.4s, v26.4s\n" - "str q22, [x26, x27]\n" - "mov v12.16b, v23.16b\n" - "fmin v21.4s, v21.4s, v27.4s\n" - "ldr q27, [x20]\n" - "mov v8.16b, v23.16b\n" - "ldr q22, [x9, %[input_col_stride1]]\n" - "str q21, [x26, x28]\n" - "mov v11.16b, v23.16b\n" - "mov v16.16b, v23.16b\n" - "add %[outptr0], %[outptr0], #16\n" - "mov v7.16b, v23.16b\n" - "add x8, x8, #16\n" - "mov v10.16b, v23.16b\n" - "add x25, x25, #16\n" - "mov v14.16b, v23.16b\n" - "add x26, x26, #16\n" - "mov v15.16b, v23.16b\n" - "mov v17.16b, v23.16b\n" - "mov v9.16b, v23.16b\n" - "fmla v12.4s, v28.4s, v20.4s\n" - "fmla v8.4s, v25.4s, v20.4s\n" - "fmla v11.4s, v18.4s, v20.4s\n" - "fmla v16.4s, v30.4s, v20.4s\n" - "fmla v12.4s, v25.4s, v19.4s\n" - "fmla v8.4s, v30.4s, v19.4s\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "fmla v8.4s, v29.4s, v6.4s\n" - "fmla v12.4s, v30.4s, v2.4s\n" - "fmla v12.4s, v29.4s, v4.4s\n" - "bne 2b\n" - "3:\n" - "mov v13.16b, v23.16b\n" - "ldr q21, [x24, x13]\n" - "mov v18.16b, v23.16b\n" - "prfm pldl1keep, [x24, x10]\n" - "fmla v11.4s, v29.4s, v19.4s\n" - "prfm pldl1keep, [%[inptr0], x21]\n" - "fmla v7.4s, v29.4s, v20.4s\n" - "ldr q25, [%[inptr0], x19]\n" - "fmla v12.4s, v24.4s, v5.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v11.4s, v24.4s, v6.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v10.4s, v24.4s, v20.4s\n" - "ldr q24, [x22]\n" - "fmla v8.4s, v27.4s, v2.4s\n" - "prfm pldl1keep, [x9, x10]\n" - "fmla v16.4s, v27.4s, v19.4s\n" - "prfm pldl1keep, [x24, x21]\n" - "fmla v14.4s, v27.4s, v20.4s\n" - "ldr q26, [x20, %[input_col_stride1]]\n" - "fmla v12.4s, v22.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v8.4s, v22.4s, v4.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v11.4s, v22.4s, v2.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "prfm pldl1keep, [x20, x10]\n" - "fmla v7.4s, v22.4s, v19.4s\n" - "prfm pldl1keep, [x9, x21]\n" - "fmla v15.4s, v22.4s, v20.4s\n" - "ldr q30, [x9, x13]\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "prfm pldl1keep, [x24, x7]\n" - "fmla v8.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [%[inptr0], x12]\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v7.4s, v21.4s, v6.4s\n" - "prfm pldl1keep, [x22, x10]\n" - "fmla v10.4s, v21.4s, v19.4s\n" - "prfm pldl1keep, [x20, x21]\n" - "fmla v17.4s, v21.4s, v20.4s\n" - "ldr q22, [x24, x19]\n" - "fmla v11.4s, v25.4s, v5.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v10.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x24, x12]\n" - "fmla v9.4s, v25.4s, v20.4s\n" - "ldr q21, [%[inptr0], x17]\n" - "fmla v16.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x10]\n" - "fmla v14.4s, v24.4s, v19.4s\n" - "ldr q24, [x23]\n" - "fmla v8.4s, v26.4s, v1.4s\n" - "prfm pldl1keep, [x22, x21]\n" - "fmla v16.4s, v26.4s, v4.4s\n" - "prfm pldl1keep, [x20, x7]\n" - "fmla v7.4s, v26.4s, v2.4s\n" - "prfm pldl1keep, [x9, x12]\n" - "fmla v14.4s, v26.4s, v6.4s\n" - "prfm pldl1keep, [x23, x21]\n" - "fmla v15.4s, v26.4s, v19.4s\n" - "prfm pldl1keep, [x22, x7]\n" - "fmla v13.4s, v26.4s, v20.4s\n" - "ldr q26, [x22, %[input_col_stride1]]\n" - "fmla v12.4s, v30.4s, v0.4s\n" - "prfm pldl1keep, [x20, x12]\n" - "fmla v8.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x23, x7]\n" - "fmla v11.4s, v30.4s, v1.4s\n" - "prfm pldl1keep, [x22, x12]\n" - "fmla v16.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x23, x12]\n" - "fmla v7.4s, v30.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #160\n" - "fmla v10.4s, v30.4s, v2.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "fmla v17.4s, v30.4s, v19.4s\n" - "fmla v18.4s, v30.4s, v20.4s\n" - "ldr q27, [x20, x13]\n" - "fmla v11.4s, v22.4s, v3.4s\n" - "fmla v7.4s, v22.4s, v5.4s\n" - "fmla v10.4s, v22.4s, v4.4s\n" - "fmla v17.4s, v22.4s, v6.4s\n" - "fmla v9.4s, v22.4s, v19.4s\n" - "fmla v14.4s, v24.4s, v2.4s\n" - "mov v25.16b, v23.16b\n" - "fmla v16.4s, v26.4s, v1.4s\n" - "fmla v10.4s, v21.4s, v5.4s\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "fmla v25.4s, v22.4s, v20.4s\n" - "ldr q28, [x9, x19]\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "ldr q29, [x24, x17]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "fmla v13.4s, v26.4s, v19.4s\n" - "mov v22.16b, v23.16b\n" - "fmla v8.4s, v27.4s, v0.4s\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "fmla v7.4s, v27.4s, v1.4s\n" - "fmla v14.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v18.4s, v27.4s, v19.4s\n" - "fmla v22.4s, v27.4s, v20.4s\n" - "mov v24.16b, v23.16b\n" - "mov v21.16b, v23.16b\n" - "fmla v11.4s, v28.4s, v0.4s\n" - "fmla v7.4s, v28.4s, v3.4s\n" - "fmla v10.4s, v28.4s, v1.4s\n" - "fmla v15.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v28.4s, v4.4s\n" - "fmla v9.4s, v28.4s, v2.4s\n" - "fmla v18.4s, v28.4s, v6.4s\n" - "fmla v25.4s, v28.4s, v19.4s\n" - "fmla v24.4s, v28.4s, v20.4s\n" - "ldr q23, [%[inptr0], x11]\n" - "fmla v10.4s, v29.4s, v3.4s\n" - "add %[inptr0], %[inptr0], #16\n" - "fmla v17.4s, v29.4s, v5.4s\n" - "fmla v9.4s, v29.4s, v4.4s\n" - "fmla v25.4s, v29.4s, v6.4s\n" - "ldr q30, [x23, %[input_col_stride1]]\n" - "fmla v14.4s, v30.4s, v1.4s\n" - "fmla v13.4s, v30.4s, v2.4s\n" - "fmla v9.4s, v23.4s, v5.4s\n" - "ldr q23, [x22, x13]\n" - "fmla v16.4s, v23.4s, v0.4s\n" - "ldr q29, [x20, x19]\n" - "fmla v14.4s, v23.4s, v3.4s\n" - "fmla v15.4s, v23.4s, v1.4s\n" - "fmla v13.4s, v23.4s, v4.4s\n" - "fmla v18.4s, v23.4s, v2.4s\n" - "fmla v22.4s, v23.4s, v19.4s\n" - "ldr q23, [x9, x17]\n" - "fmla v7.4s, v29.4s, v0.4s\n" - "fmla v15.4s, v29.4s, v3.4s\n" - "fmla v17.4s, v29.4s, v1.4s\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "fmla v18.4s, v29.4s, v4.4s\n" - "fmla v25.4s, v29.4s, v2.4s\n" - "fmla v22.4s, v29.4s, v6.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v21.4s, v29.4s, v20.4s\n" - "ldr q26, [x24, x11]\n" - "fmla v10.4s, v23.4s, v0.4s\n" - "ldr q28, [x23, x13]\n" - "fmla v17.4s, v23.4s, v3.4s\n" - "add x24, x24, #16\n" - "fmla v9.4s, v23.4s, v1.4s\n" - "fmla v18.4s, v23.4s, v5.4s\n" - "fmla v25.4s, v23.4s, v4.4s\n" - "fmla v24.4s, v23.4s, v6.4s\n" - "fmla v14.4s, v28.4s, v0.4s\n" - "ldr q20, [x22, x19]\n" - "fmla v9.4s, v26.4s, v3.4s\n" - "fmla v13.4s, v28.4s, v1.4s\n" - "fmla v25.4s, v26.4s, v5.4s\n" - "ldr q26, [x20, x17]\n" - "fmla v22.4s, v28.4s, v2.4s\n" - "ldr q23, [x9, x11]\n" - "fmla v15.4s, v20.4s, v0.4s\n" - "add x9, x9, #16\n" - "fmla v13.4s, v20.4s, v3.4s\n" - "fmla v18.4s, v20.4s, v1.4s\n" - "fmla v22.4s, v20.4s, v4.4s\n" - "fmla v24.4s, v20.4s, v2.4s\n" - "fmla v21.4s, v20.4s, v19.4s\n" - "ldr q27, [x23, x19]\n" - "fmla v17.4s, v26.4s, v0.4s\n" - "ldr q20, [x22, x17]\n" - "fmla v18.4s, v26.4s, v3.4s\n" - "fmla v25.4s, v26.4s, v1.4s\n" - "fmla v22.4s, v26.4s, v5.4s\n" - "fmla v24.4s, v26.4s, v4.4s\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr q19, [x20, x11]\n" - "fmla v9.4s, v23.4s, v0.4s\n" - "ldr q28, [x23, x17]\n" - "fmla v25.4s, v23.4s, v3.4s\n" - "add x20, x20, #16\n" - "fmla v24.4s, v23.4s, v5.4s\n" - "ldr q29, [x22, x11]\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "add x22, x22, #16\n" - "fmla v22.4s, v27.4s, v1.4s\n" - "fmla v21.4s, v27.4s, v2.4s\n" - "fmla v18.4s, v20.4s, v0.4s\n" - "ldr q30, [x23, x11]\n" - "fmla v24.4s, v20.4s, v1.4s\n" - "add x23, x23, #16\n" - "fmla v22.4s, v20.4s, v3.4s\n" - "fmla v21.4s, v20.4s, v4.4s\n" - "fmla v25.4s, v19.4s, v0.4s\n" - "movi v26.16b, #0\n" - "fmla v24.4s, v19.4s, v3.4s\n" - "fmov v27.4s, #6.0\n" - "fmla v21.4s, v19.4s, v5.4s\n" - "fmla v22.4s, v28.4s, v0.4s\n" - "fmax v12.4s, v12.4s, v26.4s\n" - "fmax v11.4s, v11.4s, v26.4s\n" - "fmla v24.4s, v29.4s, v0.4s\n" - "fmax v10.4s, v10.4s, v26.4s\n" - "fmla v21.4s, v28.4s, v1.4s\n" - "fmin v12.4s, v12.4s, v27.4s\n" - "fmin v11.4s, v11.4s, v27.4s\n" - "fmin v10.4s, v10.4s, v27.4s\n" - "str q12, [%[outptr0]]\n" - "fmax v9.4s, v9.4s, v26.4s\n" - "str q11, [%[outptr0], %[output_col_stride1]]\n" - "fmla v21.4s, v29.4s, v3.4s\n" - "str q10, [%[outptr0], x27]\n" - "fmin v9.4s, v9.4s, v27.4s\n" - "fmax v8.4s, v8.4s, v26.4s\n" - "fmax v7.4s, v7.4s, v26.4s\n" - "str q9, [%[outptr0], x28]\n" - "fmla v21.4s, v30.4s, v0.4s\n" - "fmin v8.4s, v8.4s, v27.4s\n" - "add %[outptr0], %[outptr0], #16\n" - "fmin v7.4s, v7.4s, v27.4s\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "str q8, [x8]\n" - "fmax v25.4s, v25.4s, v26.4s\n" - "str q7, [x8, %[output_col_stride1]]\n" - "fmin v17.4s, v17.4s, v27.4s\n" - "fmin v25.4s, v25.4s, v27.4s\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "str q17, [x8, x27]\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "str q25, [x8, x28]\n" - "fmin v16.4s, v16.4s, v27.4s\n" - "fmin v15.4s, v15.4s, v27.4s\n" - "add x8, x8, #16\n" - "str q16, [x25]\n" - "fmax v18.4s, v18.4s, v26.4s\n" - "str q15, [x25, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v26.4s\n" - "fmin v18.4s, v18.4s, v27.4s\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "fmin v24.4s, v24.4s, v27.4s\n" - "fmax v13.4s, v13.4s, v26.4s\n" - "str q18, [x25, x27]\n" - "fmin v14.4s, v14.4s, v27.4s\n" - "str q24, [x25, x28]\n" - "fmin v13.4s, v13.4s, v27.4s\n" - "str q14, [x26]\n" - "fmax v22.4s, v22.4s, v26.4s\n" - "str q13, [x26, %[output_col_stride1]]\n" - "fmax v21.4s, v21.4s, v26.4s\n" - "fmin v22.4s, v22.4s, v27.4s\n" - "add x25, x25, #16\n" - "fmin v21.4s, v21.4s, v27.4s\n" - "str q22, [x26, x27]\n" - "str q21, [x26, x28]\n" - "add x26, x26, #16\n" - "4:\n" - "cbz x14, 7f\n" - "ldr s23, [%[wbptr]]\n" - "mov v12.16b, v23.16b\n" - "ldr s20, [%[wbptr], #4]\n" - "mov v8.16b, v23.16b\n" - "ldr s6, [%[wbptr], #8]\n" - "mov v11.16b, v23.16b\n" - "ldr s5, [%[wbptr], #12]\n" - "mov v16.16b, v23.16b\n" - "ldr s19, [%[wbptr], #16]\n" - "mov v7.16b, v23.16b\n" - "ldr s4, [%[wbptr], #20]\n" - "mov v10.16b, v23.16b\n" - "ldr s3, [%[wbptr], #24]\n" - "mov v14.16b, v23.16b\n" - "ldr s2, [%[wbptr], #28]\n" - "mov v15.16b, v23.16b\n" - "ldr s1, [%[wbptr], #32]\n" - "mov v17.16b, v23.16b\n" - "ldr s0, [%[wbptr], #36]\n" - "mov v9.16b, v23.16b\n" - "ldr s28, [%[inptr0]]\n" - "fmla v12.4s, v28.4s, v20.4s\n" - "ldr s25, [x24]\n" - "fmla v8.4s, v25.4s, v20.4s\n" - "ldr s18, [%[inptr0], %[input_col_stride1]]\n" - "fmla v11.4s, v18.4s, v20.4s\n" - "ldr s30, [x9]\n" - "fmla v12.4s, v25.4s, v19.4s\n" - "ldr s29, [x24, %[input_col_stride1]]\n" - "fmla v8.4s, v30.4s, v19.4s\n" - "ldr s24, [%[inptr0], x13]\n" - "fmla v16.4s, v30.4s, v20.4s\n" - "ldr s27, [x20]\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "ldr s22, [x9, %[input_col_stride1]]\n" - "fmla v8.4s, v29.4s, v6.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "prfm pldl1keep, [x24, #64]\n" - "subs x14, x14, #1\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "prfm pldl1keep, [x9, #64]\n" - "fmla v12.4s, v30.4s, v2.4s\n" - "prfm pldl1keep, [x24, x16]\n" - "prfm pldl1keep, [%[inptr0], x10]\n" - "prfm pldl1keep, [x20, #64]\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v12.4s, v29.4s, v4.4s\n" - "beq 6f\n" - "5:\n" - "mov v13.16b, v23.16b\n" - "ldr s21, [x24, x13]\n" - "mov v18.16b, v23.16b\n" - "prfm pldl1keep, [x24, x10]\n" - "fmla v11.4s, v29.4s, v19.4s\n" - "prfm pldl1keep, [%[inptr0], x21]\n" - "fmla v7.4s, v29.4s, v20.4s\n" - "ldr s25, [%[inptr0], x19]\n" - "fmla v12.4s, v24.4s, v5.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v11.4s, v24.4s, v6.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v10.4s, v24.4s, v20.4s\n" - "ldr s24, [x22]\n" - "fmla v8.4s, v27.4s, v2.4s\n" - "prfm pldl1keep, [x9, x10]\n" - "fmla v16.4s, v27.4s, v19.4s\n" - "prfm pldl1keep, [x24, x21]\n" - "fmla v14.4s, v27.4s, v20.4s\n" - "ldr s26, [x20, %[input_col_stride1]]\n" - "fmla v12.4s, v22.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v8.4s, v22.4s, v4.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v11.4s, v22.4s, v2.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "prfm pldl1keep, [x20, x10]\n" - "fmla v7.4s, v22.4s, v19.4s\n" - "prfm pldl1keep, [x9, x21]\n" - "fmla v15.4s, v22.4s, v20.4s\n" - "ldr s30, [x9, x13]\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "prfm pldl1keep, [x24, x7]\n" - "fmla v8.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [%[inptr0], x12]\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v7.4s, v21.4s, v6.4s\n" - "prfm pldl1keep, [x22, x10]\n" - "fmla v10.4s, v21.4s, v19.4s\n" - "prfm pldl1keep, [x20, x21]\n" - "fmla v17.4s, v21.4s, v20.4s\n" - "ldr s22, [x24, x19]\n" - "fmla v11.4s, v25.4s, v5.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v10.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x24, x12]\n" - "fmla v9.4s, v25.4s, v20.4s\n" - "ldr s21, [%[inptr0], x17]\n" - "fmla v16.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x10]\n" - "fmla v14.4s, v24.4s, v19.4s\n" - "ldr s24, [x23]\n" - "fmla v8.4s, v26.4s, v1.4s\n" - "prfm pldl1keep, [x22, x21]\n" - "fmla v16.4s, v26.4s, v4.4s\n" - "prfm pldl1keep, [x20, x7]\n" - "fmla v7.4s, v26.4s, v2.4s\n" - "prfm pldl1keep, [x9, x12]\n" - "fmla v14.4s, v26.4s, v6.4s\n" - "prfm pldl1keep, [x23, x21]\n" - "fmla v15.4s, v26.4s, v19.4s\n" - "prfm pldl1keep, [x22, x7]\n" - "fmla v13.4s, v26.4s, v20.4s\n" - "ldr s26, [x22, %[input_col_stride1]]\n" - "fmla v12.4s, v30.4s, v0.4s\n" - "prfm pldl1keep, [x20, x12]\n" - "fmla v8.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x23, x7]\n" - "fmla v11.4s, v30.4s, v1.4s\n" - "prfm pldl1keep, [x22, x12]\n" - "fmla v16.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x23, x12]\n" - "fmla v7.4s, v30.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v10.4s, v30.4s, v2.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "subs x14, x14, #1\n" - "fmla v17.4s, v30.4s, v19.4s\n" - "fmla v18.4s, v30.4s, v20.4s\n" - "mov v25.16b, v23.16b\n" - "fmla v11.4s, v22.4s, v3.4s\n" - "fmla v7.4s, v22.4s, v5.4s\n" - "fmla v10.4s, v22.4s, v4.4s\n" - "fmla v17.4s, v22.4s, v6.4s\n" - "fmla v9.4s, v22.4s, v19.4s\n" - "fmla v25.4s, v22.4s, v20.4s\n" - "ldr s27, [x20, x13]\n" - "fmla v10.4s, v21.4s, v5.4s\n" - "fmla v14.4s, v24.4s, v2.4s\n" - "mov v22.16b, v23.16b\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "mov v24.16b, v23.16b\n" - "mov v21.16b, v23.16b\n" - "fmla v16.4s, v26.4s, v1.4s\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "fmla v13.4s, v26.4s, v19.4s\n" - "fmla v8.4s, v27.4s, v0.4s\n" - "ldr s28, [x9, x19]\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "fmla v7.4s, v27.4s, v1.4s\n" - "fmla v14.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v18.4s, v27.4s, v19.4s\n" - "fmla v22.4s, v27.4s, v20.4s\n" - "fmla v11.4s, v28.4s, v0.4s\n" - "ldr s29, [x24, x17]\n" - "fmla v7.4s, v28.4s, v3.4s\n" - "fmla v10.4s, v28.4s, v1.4s\n" - "fmla v15.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v28.4s, v4.4s\n" - "fmla v9.4s, v28.4s, v2.4s\n" - "fmla v18.4s, v28.4s, v6.4s\n" - "fmla v25.4s, v28.4s, v19.4s\n" - "fmla v24.4s, v28.4s, v20.4s\n" - "fmla v10.4s, v29.4s, v3.4s\n" - "ldr s23, [%[inptr0], x11]\n" - "fmla v17.4s, v29.4s, v5.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v9.4s, v29.4s, v4.4s\n" - "prfm pldl1keep, [%[inptr0], #64]\n" - "fmla v25.4s, v29.4s, v6.4s\n" - "ldr s30, [x23, %[input_col_stride1]]\n" - "fmla v14.4s, v30.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], x16]\n" - "fmla v9.4s, v23.4s, v5.4s\n" - "ldr s23, [x22, x13]\n" - "fmla v13.4s, v30.4s, v2.4s\n" - "ldr s29, [x20, x19]\n" - "fmla v16.4s, v23.4s, v0.4s\n" - "prfm pldl1keep, [%[inptr0], x10]\n" - "fmla v14.4s, v23.4s, v3.4s\n" - "fmla v15.4s, v23.4s, v1.4s\n" - "fmla v13.4s, v23.4s, v4.4s\n" - "fmla v18.4s, v23.4s, v2.4s\n" - "fmla v22.4s, v23.4s, v19.4s\n" - "ldr s23, [x9, x17]\n" - "fmla v7.4s, v29.4s, v0.4s\n" - "fmla v15.4s, v29.4s, v3.4s\n" - "fmla v17.4s, v29.4s, v1.4s\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "fmla v18.4s, v29.4s, v4.4s\n" - "fmla v25.4s, v29.4s, v2.4s\n" - "fmla v22.4s, v29.4s, v6.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v21.4s, v29.4s, v20.4s\n" - "ldr s26, [x24, x11]\n" - "fmla v10.4s, v23.4s, v0.4s\n" - "ldr s28, [x23, x13]\n" - "fmla v17.4s, v23.4s, v3.4s\n" - "add x24, x24, #4\n" - "fmla v9.4s, v23.4s, v1.4s\n" - "prfm pldl1keep, [x24, #64]\n" - "fmla v18.4s, v23.4s, v5.4s\n" - "prfm pldl1keep, [x24, x16]\n" - "fmla v25.4s, v23.4s, v4.4s\n" - "fmla v24.4s, v23.4s, v6.4s\n" - "fmla v9.4s, v26.4s, v3.4s\n" - "ldr s20, [x22, x19]\n" - "fmla v14.4s, v28.4s, v0.4s\n" - "fmla v13.4s, v28.4s, v1.4s\n" - "fmla v25.4s, v26.4s, v5.4s\n" - "ldr s26, [x20, x17]\n" - "fmla v22.4s, v28.4s, v2.4s\n" - "ldr s23, [x9, x11]\n" - "fmla v15.4s, v20.4s, v0.4s\n" - "add x9, x9, #4\n" - "fmla v13.4s, v20.4s, v3.4s\n" - "prfm pldl1keep, [x9, #64]\n" - "fmla v18.4s, v20.4s, v1.4s\n" - "prfm pldl1keep, [x9, x16]\n" - "fmla v22.4s, v20.4s, v4.4s\n" - "fmla v24.4s, v20.4s, v2.4s\n" - "fmla v21.4s, v20.4s, v19.4s\n" - "ldr s27, [x23, x19]\n" - "fmla v17.4s, v26.4s, v0.4s\n" - "ldr s20, [x22, x17]\n" - "fmla v18.4s, v26.4s, v3.4s\n" - "fmla v25.4s, v26.4s, v1.4s\n" - "fmla v22.4s, v26.4s, v5.4s\n" - "fmla v24.4s, v26.4s, v4.4s\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr s19, [x20, x11]\n" - "fmla v9.4s, v23.4s, v0.4s\n" - "ldr s28, [x23, x17]\n" - "fmla v25.4s, v23.4s, v3.4s\n" - "add x20, x20, #4\n" - "fmla v24.4s, v23.4s, v5.4s\n" - "ldr s29, [x22, x11]\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "prfm pldl1keep, [x20, #64]\n" - "fmla v22.4s, v27.4s, v1.4s\n" - "add x22, x22, #4\n" - "fmla v21.4s, v27.4s, v2.4s\n" - "ldr s30, [x23, x11]\n" - "fmla v18.4s, v20.4s, v0.4s\n" - "ldr s23, [%[wbptr]]\n" - "fmla v22.4s, v20.4s, v3.4s\n" - "add x23, x23, #4\n" - "fmla v24.4s, v20.4s, v1.4s\n" - "fmla v21.4s, v20.4s, v4.4s\n" - "fmla v25.4s, v19.4s, v0.4s\n" - "ldr s20, [%[wbptr], #4]\n" - "fmla v22.4s, v28.4s, v0.4s\n" - "ldr s6, [%[wbptr], #8]\n" - "fmla v21.4s, v19.4s, v5.4s\n" - "movi v26.16b, #0\n" - "fmla v24.4s, v19.4s, v3.4s\n" - "ldr s19, [%[wbptr], #16]\n" - "fmax v12.4s, v12.4s, v26.4s\n" - "fmax v11.4s, v11.4s, v26.4s\n" - "fmla v21.4s, v28.4s, v1.4s\n" - "ldr s5, [%[wbptr], #12]\n" - "fmla v24.4s, v29.4s, v0.4s\n" - "ldr s4, [%[wbptr], #20]\n" - "fmax v10.4s, v10.4s, v26.4s\n" - "fmax v9.4s, v9.4s, v26.4s\n" - "fmla v21.4s, v29.4s, v3.4s\n" - "ldr s2, [%[wbptr], #28]\n" - "fmov v27.4s, #6.0\n" - "fmax v8.4s, v8.4s, v26.4s\n" - "fmax v7.4s, v7.4s, v26.4s\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "fmla v21.4s, v30.4s, v0.4s\n" - "ldr s3, [%[wbptr], #24]\n" - "fmin v12.4s, v12.4s, v27.4s\n" - "ldr s1, [%[wbptr], #32]\n" - "fmin v11.4s, v11.4s, v27.4s\n" - "fmin v10.4s, v10.4s, v27.4s\n" - "str s12, [%[outptr0]]\n" - "fmin v9.4s, v9.4s, v27.4s\n" - "str s11, [%[outptr0], %[output_col_stride1]]\n" - "fmin v8.4s, v8.4s, v27.4s\n" - "str s10, [%[outptr0], x27]\n" - "fmin v7.4s, v7.4s, v27.4s\n" - "str s9, [%[outptr0], x28]\n" - "fmin v17.4s, v17.4s, v27.4s\n" - "str s8, [x8]\n" - "fmax v25.4s, v25.4s, v26.4s\n" - "str s7, [x8, %[output_col_stride1]]\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "str s17, [x8, x27]\n" - "fmin v25.4s, v25.4s, v27.4s\n" - "fmin v16.4s, v16.4s, v27.4s\n" - "ldr s0, [%[wbptr], #36]\n" - "str s25, [x8, x28]\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "str s16, [x25]\n" - "fmax v18.4s, v18.4s, v26.4s\n" - "fmin v15.4s, v15.4s, v27.4s\n" - "ldr s28, [%[inptr0]]\n" - "fmin v18.4s, v18.4s, v27.4s\n" - "ldr s25, [x24]\n" - "str s15, [x25, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v26.4s\n" - "str s18, [x25, x27]\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "fmin v24.4s, v24.4s, v27.4s\n" - "ldr s18, [%[inptr0], %[input_col_stride1]]\n" - "fmin v14.4s, v14.4s, v27.4s\n" - "ldr s30, [x9]\n" - "str s24, [x25, x28]\n" - "fmax v13.4s, v13.4s, v26.4s\n" - "str s14, [x26]\n" - "fmax v22.4s, v22.4s, v26.4s\n" - "fmin v13.4s, v13.4s, v27.4s\n" - "ldr s29, [x24, %[input_col_stride1]]\n" - "fmin v22.4s, v22.4s, v27.4s\n" - "ldr s24, [%[inptr0], x13]\n" - "str s13, [x26, %[output_col_stride1]]\n" - "fmax v21.4s, v21.4s, v26.4s\n" - "str s22, [x26, x27]\n" - "mov v12.16b, v23.16b\n" - "fmin v21.4s, v21.4s, v27.4s\n" - "ldr s27, [x20]\n" - "mov v8.16b, v23.16b\n" - "ldr s22, [x9, %[input_col_stride1]]\n" - "str s21, [x26, x28]\n" - "mov v11.16b, v23.16b\n" - "mov v16.16b, v23.16b\n" - "add %[outptr0], %[outptr0], #4\n" - "mov v7.16b, v23.16b\n" - "add x8, x8, #4\n" - "mov v10.16b, v23.16b\n" - "add x25, x25, #4\n" - "mov v14.16b, v23.16b\n" - "add x26, x26, #4\n" - "mov v15.16b, v23.16b\n" - "mov v17.16b, v23.16b\n" - "mov v9.16b, v23.16b\n" - "fmla v12.4s, v28.4s, v20.4s\n" - "fmla v8.4s, v25.4s, v20.4s\n" - "fmla v11.4s, v18.4s, v20.4s\n" - "fmla v16.4s, v30.4s, v20.4s\n" - "fmla v12.4s, v25.4s, v19.4s\n" - "fmla v8.4s, v30.4s, v19.4s\n" - "fmla v12.4s, v18.4s, v6.4s\n" - "fmla v8.4s, v29.4s, v6.4s\n" - "fmla v12.4s, v30.4s, v2.4s\n" - "fmla v12.4s, v29.4s, v4.4s\n" - "bne 5b\n" - "6:\n" - "mov v13.16b, v23.16b\n" - "ldr s21, [x24, x13]\n" - "mov v18.16b, v23.16b\n" - "prfm pldl1keep, [x24, x10]\n" - "fmla v11.4s, v29.4s, v19.4s\n" - "prfm pldl1keep, [%[inptr0], x21]\n" - "fmla v7.4s, v29.4s, v20.4s\n" - "ldr s25, [%[inptr0], x19]\n" - "fmla v12.4s, v24.4s, v5.4s\n" - "prfm pldl1keep, [x22, #64]\n" - "fmla v11.4s, v24.4s, v6.4s\n" - "prfm pldl1keep, [x20, x16]\n" - "fmla v10.4s, v24.4s, v20.4s\n" - "ldr s24, [x22]\n" - "fmla v8.4s, v27.4s, v2.4s\n" - "prfm pldl1keep, [x9, x10]\n" - "fmla v16.4s, v27.4s, v19.4s\n" - "prfm pldl1keep, [x24, x21]\n" - "fmla v14.4s, v27.4s, v20.4s\n" - "ldr s26, [x20, %[input_col_stride1]]\n" - "fmla v12.4s, v22.4s, v1.4s\n" - "prfm pldl1keep, [%[inptr0], x7]\n" - "fmla v8.4s, v22.4s, v4.4s\n" - "prfm pldl1keep, [x23, #64]\n" - "fmla v11.4s, v22.4s, v2.4s\n" - "prfm pldl1keep, [x22, x16]\n" - "fmla v16.4s, v22.4s, v6.4s\n" - "prfm pldl1keep, [x20, x10]\n" - "fmla v7.4s, v22.4s, v19.4s\n" - "prfm pldl1keep, [x9, x21]\n" - "fmla v15.4s, v22.4s, v20.4s\n" - "ldr s30, [x9, x13]\n" - "fmla v12.4s, v21.4s, v3.4s\n" - "prfm pldl1keep, [x24, x7]\n" - "fmla v8.4s, v21.4s, v5.4s\n" - "prfm pldl1keep, [%[inptr0], x12]\n" - "fmla v11.4s, v21.4s, v4.4s\n" - "prfm pldl1keep, [x23, x16]\n" - "fmla v7.4s, v21.4s, v6.4s\n" - "prfm pldl1keep, [x22, x10]\n" - "fmla v10.4s, v21.4s, v19.4s\n" - "prfm pldl1keep, [x20, x21]\n" - "fmla v17.4s, v21.4s, v20.4s\n" - "ldr s22, [x24, x19]\n" - "fmla v11.4s, v25.4s, v5.4s\n" - "prfm pldl1keep, [x9, x7]\n" - "fmla v10.4s, v25.4s, v6.4s\n" - "prfm pldl1keep, [x24, x12]\n" - "fmla v9.4s, v25.4s, v20.4s\n" - "ldr s21, [%[inptr0], x17]\n" - "fmla v16.4s, v24.4s, v2.4s\n" - "prfm pldl1keep, [x23, x10]\n" - "fmla v14.4s, v24.4s, v19.4s\n" - "ldr s24, [x23]\n" - "fmla v8.4s, v26.4s, v1.4s\n" - "prfm pldl1keep, [x22, x21]\n" - "fmla v16.4s, v26.4s, v4.4s\n" - "prfm pldl1keep, [x20, x7]\n" - "fmla v7.4s, v26.4s, v2.4s\n" - "prfm pldl1keep, [x9, x12]\n" - "fmla v14.4s, v26.4s, v6.4s\n" - "prfm pldl1keep, [x23, x21]\n" - "fmla v15.4s, v26.4s, v19.4s\n" - "prfm pldl1keep, [x22, x7]\n" - "fmla v13.4s, v26.4s, v20.4s\n" - "ldr s26, [x22, %[input_col_stride1]]\n" - "fmla v12.4s, v30.4s, v0.4s\n" - "prfm pldl1keep, [x20, x12]\n" - "fmla v8.4s, v30.4s, v3.4s\n" - "prfm pldl1keep, [x23, x7]\n" - "fmla v11.4s, v30.4s, v1.4s\n" - "prfm pldl1keep, [x22, x12]\n" - "fmla v16.4s, v30.4s, v5.4s\n" - "prfm pldl1keep, [x23, x12]\n" - "fmla v7.4s, v30.4s, v4.4s\n" - "add %[wbptr], %[wbptr], #40\n" - "fmla v10.4s, v30.4s, v2.4s\n" - "prfm pldl1keep, [%[wbptr], #64]\n" - "fmla v15.4s, v30.4s, v6.4s\n" - "fmla v17.4s, v30.4s, v19.4s\n" - "fmla v18.4s, v30.4s, v20.4s\n" - "ldr s27, [x20, x13]\n" - "fmla v11.4s, v22.4s, v3.4s\n" - "fmla v7.4s, v22.4s, v5.4s\n" - "fmla v10.4s, v22.4s, v4.4s\n" - "fmla v17.4s, v22.4s, v6.4s\n" - "fmla v9.4s, v22.4s, v19.4s\n" - "fmla v14.4s, v24.4s, v2.4s\n" - "mov v25.16b, v23.16b\n" - "fmla v16.4s, v26.4s, v1.4s\n" - "fmla v10.4s, v21.4s, v5.4s\n" - "fmla v15.4s, v26.4s, v2.4s\n" - "fmla v25.4s, v22.4s, v20.4s\n" - "ldr s28, [x9, x19]\n" - "fmla v9.4s, v21.4s, v6.4s\n" - "ldr s29, [x24, x17]\n" - "fmla v14.4s, v26.4s, v4.4s\n" - "fmla v13.4s, v26.4s, v19.4s\n" - "mov v22.16b, v23.16b\n" - "fmla v8.4s, v27.4s, v0.4s\n" - "fmla v16.4s, v27.4s, v3.4s\n" - "fmla v7.4s, v27.4s, v1.4s\n" - "fmla v14.4s, v27.4s, v5.4s\n" - "fmla v15.4s, v27.4s, v4.4s\n" - "fmla v17.4s, v27.4s, v2.4s\n" - "fmla v13.4s, v27.4s, v6.4s\n" - "fmla v18.4s, v27.4s, v19.4s\n" - "fmla v22.4s, v27.4s, v20.4s\n" - "mov v24.16b, v23.16b\n" - "mov v21.16b, v23.16b\n" - "fmla v11.4s, v28.4s, v0.4s\n" - "fmla v7.4s, v28.4s, v3.4s\n" - "fmla v10.4s, v28.4s, v1.4s\n" - "fmla v15.4s, v28.4s, v5.4s\n" - "fmla v17.4s, v28.4s, v4.4s\n" - "fmla v9.4s, v28.4s, v2.4s\n" - "fmla v18.4s, v28.4s, v6.4s\n" - "fmla v25.4s, v28.4s, v19.4s\n" - "fmla v24.4s, v28.4s, v20.4s\n" - "ldr s23, [%[inptr0], x11]\n" - "fmla v10.4s, v29.4s, v3.4s\n" - "add %[inptr0], %[inptr0], #4\n" - "fmla v17.4s, v29.4s, v5.4s\n" - "fmla v9.4s, v29.4s, v4.4s\n" - "fmla v25.4s, v29.4s, v6.4s\n" - "ldr s30, [x23, %[input_col_stride1]]\n" - "fmla v14.4s, v30.4s, v1.4s\n" - "fmla v13.4s, v30.4s, v2.4s\n" - "fmla v9.4s, v23.4s, v5.4s\n" - "ldr s23, [x22, x13]\n" - "fmla v16.4s, v23.4s, v0.4s\n" - "ldr s29, [x20, x19]\n" - "fmla v14.4s, v23.4s, v3.4s\n" - "fmla v15.4s, v23.4s, v1.4s\n" - "fmla v13.4s, v23.4s, v4.4s\n" - "fmla v18.4s, v23.4s, v2.4s\n" - "fmla v22.4s, v23.4s, v19.4s\n" - "ldr s23, [x9, x17]\n" - "fmla v7.4s, v29.4s, v0.4s\n" - "fmla v15.4s, v29.4s, v3.4s\n" - "fmla v17.4s, v29.4s, v1.4s\n" - "fmla v13.4s, v29.4s, v5.4s\n" - "fmla v18.4s, v29.4s, v4.4s\n" - "fmla v25.4s, v29.4s, v2.4s\n" - "fmla v22.4s, v29.4s, v6.4s\n" - "fmla v24.4s, v29.4s, v19.4s\n" - "fmla v21.4s, v29.4s, v20.4s\n" - "ldr s26, [x24, x11]\n" - "fmla v10.4s, v23.4s, v0.4s\n" - "ldr s28, [x23, x13]\n" - "fmla v17.4s, v23.4s, v3.4s\n" - "add x24, x24, #4\n" - "fmla v9.4s, v23.4s, v1.4s\n" - "fmla v18.4s, v23.4s, v5.4s\n" - "fmla v25.4s, v23.4s, v4.4s\n" - "fmla v24.4s, v23.4s, v6.4s\n" - "fmla v14.4s, v28.4s, v0.4s\n" - "ldr s20, [x22, x19]\n" - "fmla v9.4s, v26.4s, v3.4s\n" - "fmla v13.4s, v28.4s, v1.4s\n" - "fmla v25.4s, v26.4s, v5.4s\n" - "ldr s26, [x20, x17]\n" - "fmla v22.4s, v28.4s, v2.4s\n" - "ldr s23, [x9, x11]\n" - "fmla v15.4s, v20.4s, v0.4s\n" - "add x9, x9, #4\n" - "fmla v13.4s, v20.4s, v3.4s\n" - "fmla v18.4s, v20.4s, v1.4s\n" - "fmla v22.4s, v20.4s, v4.4s\n" - "fmla v24.4s, v20.4s, v2.4s\n" - "fmla v21.4s, v20.4s, v19.4s\n" - "ldr s27, [x23, x19]\n" - "fmla v17.4s, v26.4s, v0.4s\n" - "ldr s20, [x22, x17]\n" - "fmla v18.4s, v26.4s, v3.4s\n" - "fmla v25.4s, v26.4s, v1.4s\n" - "fmla v22.4s, v26.4s, v5.4s\n" - "fmla v24.4s, v26.4s, v4.4s\n" - "fmla v21.4s, v26.4s, v6.4s\n" - "ldr s19, [x20, x11]\n" - "fmla v9.4s, v23.4s, v0.4s\n" - "ldr s28, [x23, x17]\n" - "fmla v25.4s, v23.4s, v3.4s\n" - "add x20, x20, #4\n" - "fmla v24.4s, v23.4s, v5.4s\n" - "ldr s29, [x22, x11]\n" - "fmla v13.4s, v27.4s, v0.4s\n" - "add x22, x22, #4\n" - "fmla v22.4s, v27.4s, v1.4s\n" - "fmla v21.4s, v27.4s, v2.4s\n" - "fmla v18.4s, v20.4s, v0.4s\n" - "ldr s30, [x23, x11]\n" - "fmla v24.4s, v20.4s, v1.4s\n" - "add x23, x23, #4\n" - "fmla v22.4s, v20.4s, v3.4s\n" - "fmla v21.4s, v20.4s, v4.4s\n" - "fmla v25.4s, v19.4s, v0.4s\n" - "movi v26.16b, #0\n" - "fmla v24.4s, v19.4s, v3.4s\n" - "fmov v27.4s, #6.0\n" - "fmla v21.4s, v19.4s, v5.4s\n" - "fmla v22.4s, v28.4s, v0.4s\n" - "fmax v12.4s, v12.4s, v26.4s\n" - "fmax v11.4s, v11.4s, v26.4s\n" - "fmla v24.4s, v29.4s, v0.4s\n" - "fmax v10.4s, v10.4s, v26.4s\n" - "fmla v21.4s, v28.4s, v1.4s\n" - "fmin v12.4s, v12.4s, v27.4s\n" - "fmin v11.4s, v11.4s, v27.4s\n" - "fmin v10.4s, v10.4s, v27.4s\n" - "str s12, [%[outptr0]]\n" - "fmax v9.4s, v9.4s, v26.4s\n" - "str s11, [%[outptr0], %[output_col_stride1]]\n" - "fmla v21.4s, v29.4s, v3.4s\n" - "str s10, [%[outptr0], x27]\n" - "fmin v9.4s, v9.4s, v27.4s\n" - "fmax v8.4s, v8.4s, v26.4s\n" - "fmax v7.4s, v7.4s, v26.4s\n" - "str s9, [%[outptr0], x28]\n" - "fmla v21.4s, v30.4s, v0.4s\n" - "fmin v8.4s, v8.4s, v27.4s\n" - "add %[outptr0], %[outptr0], #4\n" - "fmin v7.4s, v7.4s, v27.4s\n" - "fmax v17.4s, v17.4s, v26.4s\n" - "str s8, [x8]\n" - "fmax v25.4s, v25.4s, v26.4s\n" - "str s7, [x8, %[output_col_stride1]]\n" - "fmin v17.4s, v17.4s, v27.4s\n" - "fmin v25.4s, v25.4s, v27.4s\n" - "fmax v16.4s, v16.4s, v26.4s\n" - "str s17, [x8, x27]\n" - "fmax v15.4s, v15.4s, v26.4s\n" - "str s25, [x8, x28]\n" - "fmin v16.4s, v16.4s, v27.4s\n" - "fmin v15.4s, v15.4s, v27.4s\n" - "add x8, x8, #4\n" - "str s16, [x25]\n" - "fmax v18.4s, v18.4s, v26.4s\n" - "str s15, [x25, %[output_col_stride1]]\n" - "fmax v24.4s, v24.4s, v26.4s\n" - "fmin v18.4s, v18.4s, v27.4s\n" - "fmax v14.4s, v14.4s, v26.4s\n" - "fmin v24.4s, v24.4s, v27.4s\n" - "fmax v13.4s, v13.4s, v26.4s\n" - "str s18, [x25, x27]\n" - "fmin v14.4s, v14.4s, v27.4s\n" - "str s24, [x25, x28]\n" - "fmin v13.4s, v13.4s, v27.4s\n" - "str s14, [x26]\n" - "fmax v22.4s, v22.4s, v26.4s\n" - "str s13, [x26, %[output_col_stride1]]\n" - "fmax v21.4s, v21.4s, v26.4s\n" - "fmin v22.4s, v22.4s, v27.4s\n" - "add x25, x25, #4\n" - "fmin v21.4s, v21.4s, v27.4s\n" - "str s22, [x26, x27]\n" - "str s21, [x26, x28]\n" - "add x26, x26, #4\n" - "7:\n" - : [inptr0] "+r" (input), [outptr0] "+r" (output), [wbptr] "+r" (weight_bias_ptr) - : [output_row_stride] "r" (output_row_stride * sizeof(float)), [input_row_stride] "r" (input_row_stride * sizeof(float)), [n_channels] "r" ((long) n_channels), [output_col_stride1] "r" (output_col_stride * sizeof(float)), [input_col_stride1] "r" (input_col_stride * sizeof(float)) - : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v4", "v5", "v6", "v7", "v8", "v9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x7", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x8", "x9", "memory" - ); -} - -#endif // __aarch64__ - -template class DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.cpp deleted file mode 100644 index 27bfb843f6..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "impl_dilated.hpp" - -template class depthwise::DilatedDepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 1, 1, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<4, 4, 3, 3, 2, 2, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<4, 4, 5, 5, 1, 1, float, float, float>; -template class depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float, float, float>; - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template class depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>; -template class depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>; -template class depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 1, 1, float16_t, float16_t, float16_t>; -template class depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float16_t, float16_t, float16_t>; -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp deleted file mode 100644 index 1bae815613..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include <deque> -#include <functional> -#include <memory> - -#include "depthwise.hpp" - -namespace depthwise -{ - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename TIn, typename TBias, typename TOut -> -class DilatedDepthwiseConvolution : public IDepthwiseConvolution -{ - public: - /** Create a new dilated depthwise convolution engine. - */ - DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - /** Create a new dilated depthwise convolution engine. - */ - DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - // Cannot copy or move a DilatedDepthwiseConvolution. - DilatedDepthwiseConvolution(DilatedDepthwiseConvolution&) = delete; - DilatedDepthwiseConvolution operator=(DilatedDepthwiseConvolution&) = delete; - - /* Set input tensor and stride. */ - void set_input(const void *inptr) override; - void set_input(const void *inptr, int column_stride) override; - void set_input(const void *inptr, int row_stride, int column_stride) override; - void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) override; - - /* Set output tensor and stride. */ - void set_output(void *outptr) override; - void set_output(void *outptr, int column_stride) override; - void set_output(void *outptr, int row_stride, int column_stride) override; - void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) override; - - static int get_output_size( - int dim_size, - unsigned int padding_before, - unsigned int padding_after, - int dilation_factor - ); - - int output_size( - int dim_size, unsigned int padding_before, unsigned int padding_after - ) const override; - - /* Weights and biases are re-ordered to improve memory access patterns. Use - * these methods to determine the size of the re-pack buffer and to set the - * address (and implicitly reorder the weights and biases into) the buffer. - */ - size_t get_packed_params_size(void) const override; - void set_packed_params_buffer(void *) override; - - void pack_params(const void *weights, const void *biases=nullptr) const override; - void pack_params(void *buffer, const void *weights, const void *biases=nullptr) const override; - void pack_params( - void *buffer, - const void* weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases=nullptr - ) const override; - - /* Working space is used to pad tensors on the fly. Before running any - * inference check the amount of space required, allocate and provide a - * pointer to the convolution engine. - */ - size_t get_working_space_size(unsigned int nthreads=1) const override; - void set_working_space(void *) override; - - unsigned int get_window(void) const override; - void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override; - - protected: - /** Protected constructor which also accepts a function to construct a new - * subconvolution - */ - DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right, - std::function<IDepthwiseConvolution *(int, int, int, int, int, int, nck::ActivationFunction, unsigned int, unsigned int, unsigned int, unsigned int)> subconvfn - ); - - const int _dilation_factor; - const int _n_input_rows, _n_input_cols, _n_channels; - const int _padding_top, _padding_left; - const int _n_output_rows, _n_output_cols; - - /* Dilated depthwise convolution is performed through repeated calls to - * non-dilated convolutions. If the dilation factor is $n$, then we perform - * $(n + 1)^2$ depthwise convolutions. - */ - using BaseDepthwise = DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - TIn, TBias, TOut - >; - std::deque<std::deque<std::unique_ptr<IDepthwiseConvolution>>> _convs; -}; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated_qa8_qa8.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated_qa8_qa8.cpp deleted file mode 100644 index e56583d6b3..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated_qa8_qa8.cpp +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "depthwise_quantized_dilated.hpp" -#include "impl_dilated.hpp" - -namespace depthwise { - -template <unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols> -QAsymm8DilatedDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, - KernelCols, StrideRows, StrideCols>:: - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right) - : QAsymm8DilatedDepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor, - QAsymm8DilatedDepthwiseConvolution::get_output_size( - n_input_rows, padding_top, padding_bottom, dilation_factor), - QAsymm8DilatedDepthwiseConvolution::get_output_size( - n_input_cols, padding_left, padding_right, dilation_factor), - activation, weight_quantisation, input_quantisation, - output_quantisation, padding_top, padding_left, padding_bottom, - padding_right) {} - -template <unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols> -QAsymm8DilatedDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, - KernelCols, StrideRows, StrideCols>:: - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right) - : QAsymm8DilatedDepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor, - n_output_rows, n_output_cols, activation, weight_quantisation, - input_quantisation, output_quantisation, - qasymm8::QAsymm8RescaleParams::make_rescale_params( - weight_quantisation, input_quantisation, output_quantisation), - padding_top, padding_left, padding_bottom, padding_right) {} - -template <unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols> -QAsymm8DilatedDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, - KernelCols, StrideRows, StrideCols>:: - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - const qasymm8::QAsymm8RescaleParams &rescale_parameters, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right) - : QAsymm8DilatedDepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor, - QAsymm8DilatedDepthwiseConvolution::get_output_size( - n_input_rows, padding_top, padding_bottom, dilation_factor), - QAsymm8DilatedDepthwiseConvolution::get_output_size( - n_input_cols, padding_left, padding_right, dilation_factor), - activation, weight_quantisation, input_quantisation, - output_quantisation, rescale_parameters, padding_top, padding_left, - padding_bottom, padding_right) {} - -template <unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols> -QAsymm8DilatedDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, - KernelCols, StrideRows, StrideCols>:: - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - const qasymm8::QAsymm8RescaleParams &rescale_parameters, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right) - : DilatedDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, - KernelCols, StrideRows, StrideCols, uint8_t, - int32_t, uint8_t>( - n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor, - n_output_rows, n_output_cols, activation, padding_top, padding_left, - padding_bottom, padding_right, - [weight_quantisation, input_quantisation, output_quantisation, - rescale_parameters]( - const int n_batches, const int n_input_rows, - const int n_input_cols, const int n_channels, - const int n_output_rows, const int n_output_cols, - const nck::ActivationFunction activation, - const unsigned int padding_top, const unsigned int padding_left, - const unsigned int padding_bottom, - const unsigned int padding_right) -> IDepthwiseConvolution * { - return new QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, - StrideRows, StrideCols>( - n_batches, n_input_rows, n_input_cols, n_channels, - n_output_rows, n_output_cols, activation, weight_quantisation, - input_quantisation, output_quantisation, rescale_parameters, - padding_top, padding_left, padding_bottom, padding_right); - }) {} - -} // namespace depthwise - -template class depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 1, 1>; -template class depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 2, 2>; -template class depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 1, 1>; -template class depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 2, 2>; diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_pack_parameters.cpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_pack_parameters.cpp deleted file mode 100644 index bddae51135..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_pack_parameters.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "impl_base.hpp" - -// TODO Move to common utilities somewhere -template <size_t Size> struct DType { }; -template <> struct DType<1> { using scalar_type = uint8_t; }; -template <> struct DType<2> { using scalar_type = uint16_t; }; -template <> struct DType<4> { using scalar_type = uint32_t; }; - -namespace depthwise -{ - -template <unsigned int KernelRows, unsigned int KernelColumns, size_t WeightSize, size_t BiasSize> -void PackParameters<KernelRows, KernelColumns, WeightSize, BiasSize>::execute( - unsigned int n_channels, - void *buffer, - const void *weights, - const unsigned int weight_row_stride, - const unsigned int weight_col_stride, - const void *biases -) -{ - using TWeight = typename DType<WeightSize>::scalar_type; - using TBias = typename DType<BiasSize>::scalar_type; - - auto buffer_ptr = static_cast<uint8_t *>(buffer); - auto weights_ptr = static_cast<const TWeight *>(weights); - auto biases_ptr = static_cast<const TBias *>(biases); - - const unsigned int veclen = 16 / WeightSize; - for (; n_channels >= veclen; n_channels -= veclen) - { - // Copy biases - for (unsigned int i = 0; i < veclen; i++) - { - auto ptr = reinterpret_cast<TBias *>(buffer_ptr); - *ptr = (biases_ptr == nullptr) ? 0x0 : *(biases_ptr++); - buffer_ptr += BiasSize; - } - - // Copy weights - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelColumns; j++) - { - for (unsigned int c = 0; c < veclen; c++) - { - *(reinterpret_cast<TWeight *>(buffer_ptr)) = weights_ptr[i*weight_row_stride + j*weight_col_stride + c]; - buffer_ptr += WeightSize; - } - } - } - weights_ptr += veclen; - } - for (; n_channels; n_channels--) - { - // Copy bias - auto ptr = reinterpret_cast<TBias *>(buffer_ptr); - *ptr = (biases_ptr == nullptr) ? 0x0 : *(biases_ptr++); - buffer_ptr += BiasSize; - - // Copy weights - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelColumns; j++) - { - *(reinterpret_cast<TWeight *>(buffer_ptr)) = weights_ptr[i*weight_row_stride + j*weight_col_stride]; - buffer_ptr += WeightSize; - } - } - weights_ptr++; - } -} - -template struct PackParameters<3, 3, 2ul, 2ul>; -template struct PackParameters<3, 3, 4ul, 4ul>; -template struct PackParameters<5, 5, 2ul, 2ul>; -template struct PackParameters<5, 5, 4ul, 4ul>; -} // namespace diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp deleted file mode 100644 index 4343f6ad45..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once -#include "depthwise.hpp" -#include "qasymm8.hpp" -#include "qsymm8.hpp" -#pragma once - -using namespace neon_convolution_kernels; -using namespace qasymm8; - -inline int32x4_t saturating_doubling_high_mul(const int32x4_t& a, const int32x4_t& b) -{ - return vqrdmulhq_s32(a, b); -} - -inline int32x4_t saturating_doubling_high_mul(const int32x4_t& a, const int32_t& b) -{ - return vqrdmulhq_n_s32(a, b); -} - -inline int32_t saturating_doubling_high_mul(const int32_t& a, const int32_t& b) -{ - return vget_lane_s32(vqrdmulh_n_s32(vdup_n_s32(a), b), 0); -} - -inline int32x4_t rounding_divide_by_exp2(const int32x4_t& x, const int32x4_t shift) -{ - const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift), 31); - const int32x4_t fixed = vqaddq_s32(x, fixup); - return vrshlq_s32(fixed, shift); -} - -inline int32x4_t rounding_divide_by_exp2(const int32x4_t& x, const int exponent) -{ - const int32x4_t shift = vdupq_n_s32(-exponent); - const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift), 31); - const int32x4_t fixed = vqaddq_s32(x, fixup); - return vrshlq_s32(fixed, shift); -} - -inline int32x2_t rounding_divide_by_exp2(const int32x2_t& x, const int exponent) -{ - const int32x2_t shift = vdup_n_s32(-exponent); - const int32x2_t fixup = vshr_n_s32(vand_s32(x, shift), 31); - const int32x2_t fixed = vqadd_s32(x, fixup); - return vrshl_s32(fixed, shift); -} - -inline int32_t rounding_divide_by_exp2(const int32_t& x, const int exponent) -{ - const int32x2_t xs = vdup_n_s32(x); - return vget_lane_s32(rounding_divide_by_exp2(xs, exponent), 0); -} - -namespace depthwise -{ - -namespace nck = neon_convolution_kernels; - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -class QAsymm8DepthwiseConvolution : public DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - uint8_t, int32_t, uint8_t, - QAsymm8DepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols> -> -{ - using Base = DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - uint8_t, int32_t, uint8_t, - QAsymm8DepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols> - >; - friend Base; - using InputType = typename Base::InputType; - using OutputType = typename Base::OutputType; - - public: - QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - const qasymm8::QAsymm8RescaleParams& rescale_parameters, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - const qasymm8::QAsymm8RescaleParams& rescale_parameters, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - protected: - uint8_t _input_padding_value(void) const; - - void _pack_params( - void *buffer, - const void *weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases=nullptr - ) const; - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - uint8_t* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride - ); - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - uint8_t* outptrs[Base::output_tile_rows][Base::output_tile_cols] - ); - - private: - // Quantization parameters - const qasymm8::QAsymm8Params _weights_quant, _inputs_quant, _output_quant; - const qasymm8::QAsymm8RescaleParams rescale_parameters; -}; - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -class QSymm8HybridPerChannelDepthwiseConvolution : public DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - uint8_t, int32_t, uint8_t, - QSymm8HybridPerChannelDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols> -> -{ - using Base = DepthwiseConvolutionBase< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, - StrideRows, StrideCols, - uint8_t, int32_t, uint8_t, - QSymm8HybridPerChannelDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols> - >; - friend Base; - using InputType = typename Base::InputType; - using OutputType = typename Base::OutputType; - - public: - QSymm8HybridPerChannelDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - const qsymm8::QSymm8PerChannelParams& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - QSymm8HybridPerChannelDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - nck::ActivationFunction activation, - const qsymm8::QSymm8PerChannelParams& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - const qsymm8::QSymm8PerChannelRescaleParams& rescale_parameters, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right - ); - - size_t get_packed_params_size(void) const override - { - return this->n_channels() * (sizeof(int8_t)*KernelRows*KernelCols + 3*sizeof(int32_t)); - - } - - protected: - uint8_t _input_padding_value(void) const; - - void _pack_params( - void *buffer, - const void *weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases=nullptr - ) const; - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - uint8_t* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride - ); - - template <nck::ActivationFunction Activation> - void execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - uint8_t* outptrs[Base::output_tile_rows][Base::output_tile_cols] - ); - - private: - // Quantization parameters - const qsymm8::QSymm8PerChannelParams _weights_quant; - const qasymm8::QAsymm8Params _input_quant, _output_quant; - const qsymm8::QSymm8PerChannelRescaleParams _rescale_parameters; -}; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp deleted file mode 100644 index a11b0981c9..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once -#include "depthwise_dilated.hpp" -#include "depthwise_quantized.hpp" - -namespace depthwise { - -template <unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols> -class QAsymm8DilatedDepthwiseConvolution - : public DilatedDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, - StrideCols, uint8_t, int32_t, uint8_t> { -public: - /** Create a new dilated depthwise convolution engine. - */ - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right); - - /** Create a new dilated depthwise convolution engine. - */ - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right); - - /** Create a new dilated depthwise convolution engine. - */ - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - const qasymm8::QAsymm8RescaleParams &rescale_parameters, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right); - - /** Create a new dilated depthwise convolution engine. - */ - QAsymm8DilatedDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int dilation_factor, int n_output_rows, int n_output_cols, - nck::ActivationFunction activation, - const qasymm8::QAsymm8Params &weight_quantisation, - const qasymm8::QAsymm8Params &input_quantisation, - const qasymm8::QAsymm8Params &output_quantisation, - const qasymm8::QAsymm8RescaleParams& rescale_parameters, - unsigned int padding_top, unsigned int padding_left, - unsigned int padding_bottom, unsigned int padding_right); -}; - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_base.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_base.hpp deleted file mode 100644 index 266d13d6fc..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/impl_base.hpp +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - * - * NOTE: Header to be included by implementation files only. - * - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - */ - -#include <algorithm> -#include <cstdint> -#include "depthwise.hpp" -#include "padding.hpp" -#include "utils.hpp" - -#pragma once - -#define MEMBERFN(TOUT) template <\ - unsigned int OutputTileRows, unsigned int OutputTileColumns,\ - unsigned int KernelRows, unsigned int KernelColumns,\ - unsigned int StrideRows, unsigned int StrideColumns,\ - typename TIn, typename TBias, typename TOut,\ - typename Derived\ -> TOUT DepthwiseConvolutionBase<\ - OutputTileRows, OutputTileColumns,\ - KernelRows, KernelColumns,\ - StrideRows, StrideColumns,\ - TIn, TBias, TOut, Derived\ -> - -using namespace neon_convolution_kernels; - -namespace depthwise -{ - -template <unsigned int KernelRows, unsigned int KernelColumns, size_t WeightSize, size_t BiasSize> -struct PackParameters -{ - static void execute( - unsigned int n_channels, - void *buffer, - const void *weights, - unsigned int weight_row_stride, - unsigned int weight_col_stride, - const void *biases - ); -}; - -const unsigned int CHANNEL_BLOCK = 16; - -MEMBERFN(int)::get_output_size( - const int dim_size, const unsigned int padding_before, const unsigned int padding_after -) -{ - return iceildiv(dim_size + padding_before + padding_after - KernelRows + 1, StrideRows); -} - -MEMBERFN(int)::output_size( - const int dim_size, const unsigned int padding_before, const unsigned int padding_after -) const -{ - return get_output_size(dim_size, padding_before, padding_after); -} - -MEMBERFN()::DepthwiseConvolutionBase( - const int n_batches, - const int n_input_rows, - const int n_input_cols, - const int n_channels, - ActivationFunction activation, - const unsigned int padding_top, - const unsigned int padding_left, - const unsigned int padding_bottom, - const unsigned int padding_right -) : DepthwiseConvolutionBase( - n_batches, n_input_rows, n_input_cols, n_channels, - get_output_size(n_input_rows, padding_top, padding_bottom), - get_output_size(n_input_cols, padding_left, padding_right), - activation, - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -MEMBERFN()::DepthwiseConvolutionBase( - const int n_batches, - const int n_input_rows, - const int n_input_cols, - const int n_channels, - const int n_output_rows, - const int n_output_cols, - ActivationFunction activation, - const unsigned int padding_top, - const unsigned int padding_left, - const unsigned int padding_bottom, - const unsigned int padding_right -) : _input(nullptr), _output(nullptr), - _packed_parameters(nullptr), - _working_space(nullptr), - _n_batches(n_batches), - _n_input_rows(n_input_rows), - _n_input_cols(n_input_cols), - _n_channels(n_channels), - _n_output_rows(n_output_rows), - _n_output_cols(n_output_cols), - _n_tile_rows(iceildiv(_n_output_rows, output_tile_rows)), - _n_tile_cols(iceildiv(_n_output_cols, output_tile_cols)), - _padding_top(padding_top), - _padding_left(padding_left), - _padding_bottom(padding_bottom), - _padding_right(padding_right), - _activation(activation), - _input_col_stride(0), _input_row_stride(0), _input_batch_stride(0), - _output_col_stride(0), _output_row_stride(0), _output_batch_stride(0) -{ -} - -MEMBERFN(void)::set_input(const void* const inptr) -{ - set_input(inptr, _n_channels); -} - -MEMBERFN(void)::set_input(const void* const inptr, const int ld_col) -{ - set_input(inptr, _n_input_cols * ld_col, ld_col); -} - -MEMBERFN(void)::set_input(const void* const inptr, const int ld_row, const int ld_col) -{ - set_input(inptr, _n_input_rows * ld_row, ld_row, ld_col); -} - -MEMBERFN(void)::set_input(const void* const inptr, const int ld_batch, const int ld_row, const int ld_col) -{ - _input = static_cast<const TIn *>(inptr); - _input_batch_stride = ld_batch; - _input_row_stride = ld_row; - _input_col_stride = ld_col; -} - -MEMBERFN(void)::set_output(void* const outptr) -{ - set_output(outptr, _n_channels); -} - -MEMBERFN(void)::set_output(void* const outptr, const int ld_col) -{ - set_output(outptr, _n_output_cols * ld_col, ld_col); -} - -MEMBERFN(void)::set_output(void* const outptr, const int ld_row, const int ld_col) -{ - set_output(outptr, _n_output_rows * ld_row, ld_row, ld_col); -} - -MEMBERFN(void)::set_output(void* const outptr, const int ld_batch, const int ld_row, const int ld_col) -{ - _output = static_cast<TOut *>(outptr); - _output_batch_stride = ld_batch; - _output_row_stride = ld_row; - _output_col_stride = ld_col; -} - -MEMBERFN(size_t)::get_packed_params_size(void) const -{ - return _n_channels * (sizeof(TIn)*KernelRows*KernelColumns + sizeof(TBias)); -} - -MEMBERFN(void)::set_packed_params_buffer(void *buffer) -{ - _packed_parameters = buffer; -} - -MEMBERFN(void)::pack_params(const void *weights, const void *biases) const -{ - static_cast<const Derived *>(this)->pack_params(_packed_parameters, weights, biases); -} - -MEMBERFN(void)::pack_params(void *buffer, const void *weights, const void *biases) const -{ - const unsigned int weight_col_stride = _n_channels; - const unsigned int weight_row_stride = KernelColumns * weight_col_stride; - static_cast<const Derived *>(this)->pack_params( - buffer, weights, weight_row_stride, weight_col_stride, biases - ); -} - -MEMBERFN(void)::pack_params( - void * const buffer, - const void * const weights, - const unsigned int weight_row_stride, - const unsigned int weight_col_stride, - const void * const biases -) const -{ - static_cast<const Derived *>(this)->_pack_params( - buffer, weights, weight_row_stride, weight_col_stride, biases - ); -} - -MEMBERFN(void)::_pack_params( - void * const buffer, - const void * const weights, - const unsigned int weight_row_stride, - const unsigned int weight_col_stride, - const void * const biases -) const -{ - // Default implementation - PackParameters<KernelRows, KernelColumns, sizeof(TIn), sizeof(TOut)>::execute( - _n_channels, buffer, weights, weight_row_stride, weight_col_stride, biases - ); -} - -MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const -{ - return nthreads * ( - _get_input_working_space_size() + _get_output_working_space_size() - ); -} - -MEMBERFN(void)::set_working_space(void *buffer) -{ - _working_space = buffer; -} - -MEMBERFN(size_t)::_get_input_working_space_size(void) const -{ - return sizeof(TIn) * _n_channels; -} - -MEMBERFN(size_t)::_get_output_working_space_size(void) const -{ - return sizeof(TOut) * _n_channels; -} - -MEMBERFN(void *)::_get_input_working_space(const unsigned int threadid) const -{ - return static_cast<uint8_t*>(_working_space) + threadid * ( - _get_input_working_space_size() + _get_output_working_space_size() - ); -} - -MEMBERFN(void *)::_get_output_working_space(const unsigned int threadid) const -{ - return static_cast<uint8_t*>(_get_input_working_space(threadid)) + _get_input_working_space_size(); -} - -MEMBERFN(unsigned int)::get_window() const -{ - // Parallelise over blocks of channels. - return iceildiv(_n_channels, CHANNEL_BLOCK); -} - -MEMBERFN(void)::run( - const unsigned int start, - const unsigned int stop, - const unsigned int threadid -) -{ - // Clear the input padding buffer - TIn *buf = static_cast<TIn *>(_get_input_working_space(threadid)); - const TIn pad_value = static_cast<Derived *>(this)->_input_padding_value(); - for (int n = 0; n < _n_channels; n++) - { - buf[n] = pad_value; - } - - // Parallelise over blocks of channels - const auto start_channel = CHANNEL_BLOCK * start; - const auto stop_channel = std::min<unsigned int>(_n_channels, CHANNEL_BLOCK * stop); - const auto params_size_per_channel = this->get_packed_params_size()/_n_channels; - - // Compute top and bottom padding for input and output - const int input_pad_top = _padding_top; - const int input_pad_left = _padding_left; - constexpr int tile_overlap = kernel_rows - stride_rows; - - // Perform the convolution by calling `process_tile_row` for each tile row in - // each batch. - for (int batch = 0; batch < _n_batches; batch++) - { - const TIn* const inptr_batch = _input + batch*_input_batch_stride; - TOut* const outptr_batch = _output + batch*_output_batch_stride; - - // Loop over rows of tiles - for (int tile_i = 0; tile_i < _n_tile_rows; tile_i++) - { - // Pointer to the row - const int input_row_offset = (tile_i == 0) ? 0 : input_pad_top; - const TIn* const inptr_row = (inptr_batch + ((inner_tile_rows - tile_overlap)*tile_i - input_row_offset)*_input_row_stride); - TOut* const outptr_row = outptr_batch + output_tile_rows * tile_i * _output_row_stride; - - // Input padding (top + bottom) for the row - const int input_row_top = tile_i*(inner_tile_rows - tile_overlap) - input_pad_top; - const int input_row_bottom = input_row_top + inner_tile_rows; - const int input_row_pad_top = (tile_i == 0) ? input_pad_top : 0; - const int input_row_pad_bottom = std::max(0, input_row_bottom - _n_input_rows); - - // Output padding (bottom) for the row - const int output_row_bottom = (tile_i + 1)*output_tile_rows; - const int output_row_pad_bottom = std::max(0, output_row_bottom - _n_output_rows); - - // Get the offset into the packed parameters - const auto params_ptr = static_cast<const uint8_t*>(_packed_parameters) + - start_channel*params_size_per_channel; - - // Process the row - process_tile_row( - threadid, - stop_channel - start_channel, - params_ptr, - inptr_row + start_channel, - outptr_row + start_channel, - input_row_pad_top, input_pad_left, input_row_pad_bottom, - output_row_pad_bottom, - _n_tile_cols, _n_input_cols, _n_output_cols - ); - } - } -} - -MEMBERFN(void)::process_tile_row( - const unsigned int threadid, - const int n_channels, - const void* const packed_params, - const TIn* const inptr, - TOut* const outptr, - const int row_pad_in_top, - const int row_pad_in_left, - const int row_pad_in_bottom, - const int row_pad_out_bottom, - const int n_tiles, - const int n_input_cols, - const int n_output_cols -) -{ - constexpr int tile_overlap = kernel_cols - stride_cols; - - // Loop over columns of tiles - for (int tile_j = 0; tile_j < n_tiles; tile_j++) - { - // Input padding (left + right) for the tile - const int t_pad_in_left = (tile_j == 0) ? row_pad_in_left : 0; - const int t_in_start = tile_j*(inner_tile_cols - tile_overlap) - row_pad_in_left; - const int t_in_end = t_in_start + inner_tile_cols; - const int t_pad_in_right = std::max(0, t_in_end - n_input_cols); - - // Output padding (right) for the tile - const int t_out_end = (tile_j + 1) * output_tile_cols; - const int t_pad_out_right = std::max(0, t_out_end - n_output_cols); - - // Get pointers into the inputs and outputs - const int col_offset = (tile_j == 0) ? 0 : row_pad_in_left; - const TIn* const inptr_col = (inptr + ((inner_tile_cols - tile_overlap)*tile_j - col_offset)*_input_col_stride); - TOut* const outptr_col = outptr + tile_j * output_tile_cols * _output_col_stride; - - // Process just this tile - process_tile( - threadid, n_channels, packed_params, inptr_col, outptr_col, - row_pad_in_top, t_pad_in_left, row_pad_in_bottom, t_pad_in_right, // Input paddings - row_pad_out_bottom, t_pad_out_right // Output paddings - ); - } -} - -MEMBERFN(TIn)::_input_padding_value(void) const -{ - return static_cast<TIn>(0); -} - -MEMBERFN(void)::process_tile( - const unsigned int threadid, - const int n_channels, - const void* const packed_params, - const TIn* const inptr, - TOut* const outptr, - const int pad_in_top, - const int pad_in_left, - const int pad_in_bottom, - const int pad_in_right, - const int pad_out_bottom, - const int pad_out_right -) -{ - Derived * dthis = static_cast<Derived *>(this); - const bool pad_input = pad_in_top || pad_in_left || pad_in_bottom || pad_in_right; - const bool pad_output = pad_out_bottom || pad_out_right; - - if (!pad_input && !pad_output) - { - switch(_activation) - { - case ActivationFunction::ReLU: - dthis->template execute_tile<ActivationFunction::ReLU>( - n_channels, packed_params, - inptr, _input_row_stride, _input_col_stride, - outptr, _output_row_stride, _output_col_stride - ); - break; - case ActivationFunction::ReLU6: - dthis->template execute_tile<ActivationFunction::ReLU6>( - n_channels, packed_params, - inptr, _input_row_stride, _input_col_stride, - outptr, _output_row_stride, _output_col_stride - ); - break; - default: - dthis->template execute_tile<ActivationFunction::None>( - n_channels, packed_params, - inptr, _input_row_stride, _input_col_stride, - outptr, _output_row_stride, _output_col_stride - ); - break; - } - } - else - { - // Create arrays of input and output pointers, pointing padded elements to - // the working space padding buffers provided. - const TIn *inptrs[inner_tile_rows][inner_tile_cols]; - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - if (i < pad_in_top || (inner_tile_rows - pad_in_bottom) <= i || - j < pad_in_left || (inner_tile_cols - pad_in_right) <= j) - { - // Padded input - inptrs[i][j] = static_cast<const TIn *>(_get_input_working_space(threadid)); - } - else - { - inptrs[i][j] = inptr + (i - pad_in_top)*_input_row_stride + (j - pad_in_left)*_input_col_stride; - } - } - } - - TOut *outptrs[output_tile_rows][output_tile_cols]; - for (int i = 0; i < output_tile_rows; i++) - { - for (int j = 0; j < output_tile_cols; j++) - { - if (i < (output_tile_rows - pad_out_bottom) && - j < (output_tile_cols - pad_out_right)) - { - outptrs[i][j] = outptr + i*_output_row_stride + j*_output_col_stride; - } - else - { - outptrs[i][j] = static_cast<TOut *>(_get_output_working_space(threadid)); - } - } - } - - switch(_activation) - { - case ActivationFunction::ReLU: - dthis->template execute_tile<ActivationFunction::ReLU>( - n_channels, packed_params, inptrs, outptrs - ); - break; - case ActivationFunction::ReLU6: - dthis->template execute_tile<ActivationFunction::ReLU6>( - n_channels, packed_params, inptrs, outptrs - ); - break; - default: - dthis->template execute_tile<ActivationFunction::None>( - n_channels, packed_params, inptrs, outptrs - ); - break; - } - } -} - -MEMBERFN(int)::n_channels(void) const -{ - return _n_channels; -} - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp deleted file mode 100644 index 4130188187..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "depthwise_dilated.hpp" -#include "utils.hpp" - -#define MEMBERFN(TOUT) \ - template <unsigned int OutputTileRows, unsigned int OutputTileColumns, \ - unsigned int KernelRows, unsigned int KernelColumns, \ - unsigned int StrideRows, unsigned int StrideColumns, typename TIn, \ - typename TBias, typename TOut> \ - TOUT DilatedDepthwiseConvolution<OutputTileRows, OutputTileColumns, \ - KernelRows, KernelColumns, StrideRows, \ - StrideColumns, TIn, TBias, TOut> - -namespace depthwise { - -MEMBERFN() -::DilatedDepthwiseConvolution(const int n_batches, const int n_input_rows, - const int n_input_cols, const int n_channels, - const int dilation_factor, - nck::ActivationFunction activation, - const unsigned int padding_top, - const unsigned int padding_left, - const unsigned int padding_bottom, - const unsigned int padding_right) - : DilatedDepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor, - DilatedDepthwiseConvolution::get_output_size( - n_input_rows, padding_top, padding_bottom, dilation_factor), - DilatedDepthwiseConvolution::get_output_size( - n_input_cols, padding_left, padding_right, dilation_factor), - activation, padding_top, padding_left, padding_bottom, - padding_right) {} - -MEMBERFN() -::DilatedDepthwiseConvolution(const int n_batches, const int n_input_rows, - const int n_input_cols, const int n_channels, - const int dilation_factor, - const int n_output_rows, const int n_output_cols, - nck::ActivationFunction activation, - const unsigned int padding_top, - const unsigned int padding_left, - const unsigned int, // padding_bottom - const unsigned int // padding_right - ) - : DilatedDepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor, - n_output_rows, n_output_cols, activation, padding_top, padding_left, - 0, 0, - // Function which creates a new (standard) depthwise convolution - [](const int n_batches, const int n_input_rows, - const int n_input_cols, const int n_channels, - const int n_output_rows, const int n_output_cols, - const nck::ActivationFunction activation, - const unsigned int padding_top, const unsigned int padding_left, - const unsigned int padding_bottom, - const unsigned int padding_right) -> IDepthwiseConvolution * { - return new DepthwiseConvolution< - OutputTileRows, OutputTileColumns, KernelRows, KernelColumns, - StrideRows, StrideColumns, TIn, TBias, TOut>( - n_batches, n_input_rows, n_input_cols, n_channels, - n_output_rows, n_output_cols, activation, padding_top, - padding_left, padding_bottom, padding_right); - }) {} - -MEMBERFN() -::DilatedDepthwiseConvolution( - const int n_batches, const int n_input_rows, const int n_input_cols, - const int n_channels, const int dilation_factor, const int n_output_rows, - const int n_output_cols, nck::ActivationFunction activation, - const unsigned int padding_top, const unsigned int padding_left, - const unsigned int, // padding_bottom - const unsigned int, // padding_right - std::function<IDepthwiseConvolution *( - int, int, int, int, int, int, nck::ActivationFunction, unsigned int, - unsigned int, unsigned int, unsigned int)> - subconvfn // Function to create a new convolution - ) - : _dilation_factor(dilation_factor), _n_input_rows(n_input_rows), - _n_input_cols(n_input_cols), _n_channels(n_channels), - _padding_top(static_cast<int>(padding_top)), - _padding_left(static_cast<int>(padding_left)), - _n_output_rows(n_output_rows), _n_output_cols(n_output_cols), - _convs(_dilation_factor) { - // Instantiate the base convolutions - for (uint32_t i = 0; i < static_cast<uint32_t>(_dilation_factor); i++) { - // Compute properties of this row of base convolutions - const int row_top = - i * StrideRows - _padding_top; // -ve values are in the padding - const int row_pad_top = - row_top < 0 ? iceildiv(-row_top, dilation_factor) : 0; - - const int _n_input_rows = iceildiv(n_input_rows - i, dilation_factor); - const int _n_output_rows = iceildiv(n_output_rows - i, dilation_factor); - - for (uint32_t j = 0; j < static_cast<uint32_t>(_dilation_factor); j++) { - // Compute properties of the base convolution - const int col_left = - j * StrideColumns - padding_left; // -ve values are in the padding - const int col_pad_left = - col_left < 0 ? iceildiv(-col_left, dilation_factor) : 0; - - const int _n_input_cols = iceildiv(n_input_cols - j, dilation_factor); - const int _n_output_cols = iceildiv(n_output_cols - j, dilation_factor); - - // Create new depthwise convolution engine and include it in the vector - // of engines. The new depthwise convolution engine is created by calling - // the delegate function we received as an argument. - _convs[i].emplace_back(subconvfn( - n_batches, _n_input_rows, _n_input_cols, n_channels, _n_output_rows, - _n_output_cols, activation, - // Note: since we have computed the output tensor size we don't need - // to explicitly provide bottom and right padding values to the - // depthwise convolution. - row_pad_top, col_pad_left, 0, 0)); - } - } -} - -MEMBERFN(void)::set_input(const void *const inptr) { - set_input(inptr, _n_channels); -} - -MEMBERFN(void)::set_input(const void *const inptr, const int ldcol) { - set_input(inptr, _n_input_cols * ldcol, ldcol); -} - -MEMBERFN(void) -::set_input(const void *const inptr, const int ldrow, const int ldcol) { - set_input(inptr, _n_input_rows * ldrow, ldrow, ldcol); -} - -MEMBERFN(void) -::set_input(const void *const inptr, const int ldbatch, const int ldrow, - const int ldcol) { - // Compute dilated strides - const int ldrow_dilated = ldrow * _dilation_factor; - const int ldcol_dilated = ldcol * _dilation_factor; - - // Pass input parameters on to base convolutions - for (uint32_t i = 0; i < static_cast<uint32_t>(_dilation_factor); i++) { - const int top_pos = - i * StrideRows - _padding_top + - ((static_cast<int>(i * StrideRows) < _padding_top) - ? iceildiv(_padding_top - i * StrideRows, _dilation_factor) * - _dilation_factor - : 0); - const TIn *const inptr_i = - static_cast<const TIn *>(inptr) + top_pos * ldrow; - - for (uint32_t j = 0; j < static_cast<uint32_t>(_dilation_factor); j++) { - int left_pos = j * StrideColumns - _padding_left; - while (left_pos < 0) - left_pos += _dilation_factor; - - // Modify the pointer to point to the first element of the dilated input - // tensor, then set the input for this convolution engine. - const void *const inptr_ij = inptr_i + left_pos * ldcol; - _convs[i][j]->set_input(inptr_ij, ldbatch, ldrow_dilated, ldcol_dilated); - } - } -} - -MEMBERFN(void)::set_output(void *const outptr) { - set_output(outptr, _n_channels); -} - -MEMBERFN(void)::set_output(void *const outptr, const int ldcol) { - set_output(outptr, _n_output_cols * ldcol, ldcol); -} - -MEMBERFN(void) -::set_output(void *const outptr, const int ldrow, const int ldcol) { - set_output(outptr, _n_output_rows * ldrow, ldrow, ldcol); -} - -MEMBERFN(void) -::set_output(void *const outptr, const int ldbatch, const int ldrow, - const int ldcol) { - // Compute dilated strides - const int ldrow_dilated = ldrow * _dilation_factor; - const int ldcol_dilated = ldcol * _dilation_factor; - - // Pass input parameters on to base convolutions - for (uint32_t i = 0; i < static_cast<uint32_t>(_dilation_factor); i++) { - for (uint32_t j = 0; j < static_cast<uint32_t>(_dilation_factor); j++) { - // Modify the pointer to point to the first element of the dilated input - // tensor, then set the input for this convolution engine. - void *const outptr_ij = - static_cast<TOut *>(outptr) + i * ldrow + j * ldcol; - _convs[i][j]->set_output(outptr_ij, ldbatch, ldrow_dilated, - ldcol_dilated); - } - } -} - -MEMBERFN(int) -::get_output_size(const int dim_size, const unsigned int padding_before, - const unsigned int padding_after, const int dilation_factor) { - const int input_size = - dim_size + static_cast<int>(padding_before + padding_after); - const int window_size = (KernelRows - 1) * dilation_factor + 1; - return iceildiv(input_size - window_size + 1, StrideRows); -} - -MEMBERFN(int) -::output_size(const int dim_size, const unsigned int padding_before, - const unsigned int padding_after) const { - return get_output_size(dim_size, padding_before, padding_after, - _dilation_factor); -} - -MEMBERFN(size_t)::get_packed_params_size(void) const { - return _convs[0][0]->get_packed_params_size(); -} - -MEMBERFN(void)::set_packed_params_buffer(void *buffer) { - // Set the buffer for all convolution engines - for (auto &&row : _convs) { - for (auto &&conv : row) { - conv->set_packed_params_buffer(buffer); - } - } -} - -MEMBERFN(void) -::pack_params(const void *const weights, const void *const biases) const { - _convs[0][0]->pack_params(weights, biases); -} - -MEMBERFN(void) -::pack_params(void *const buffer, const void *const weights, - const void *const biases) const { - _convs[0][0]->pack_params(buffer, weights, biases); -} - -MEMBERFN(void) -::pack_params(void *const buffer, const void *const weights, - const unsigned int ldrow, const unsigned int ldcol, - const void *const biases) const { - _convs[0][0]->pack_params(buffer, weights, ldrow, ldcol, biases); -} - -MEMBERFN(size_t)::get_working_space_size(unsigned int nthreads) const { - return _convs[0][0]->get_working_space_size(nthreads); -} - -MEMBERFN(void)::set_working_space(void *const ws) { - // Use the same working space set for all contained depthwise engines. - for (auto &&row : _convs) { - for (auto &&conv : row) { - conv->set_working_space(ws); - } - } -} - -MEMBERFN(unsigned int)::get_window(void) const { - return _convs[0][0]->get_window(); -} - -MEMBERFN(void) -::run(const unsigned int start, const unsigned int stop, - const unsigned int threadid) { - // Run each contained convolution in turn - for (auto &&row : _convs) { - for (auto &&conv : row) { - conv->run(start, stop, threadid); - } - } -} - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp deleted file mode 100644 index a00a1ef04a..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/impl_fp16_fp16.hpp +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - * - * NOTE: Header to be included by implementation files only. - * - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#include "arm.hpp" -#include "impl_base.hpp" - -#pragma once - -using namespace neon_convolution_kernels; - -namespace depthwise -{ - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float16_t, float16_t, float16_t ->::DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, activation, - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float16_t, float16_t, float16_t ->::DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, - n_output_rows, n_output_cols, activation, - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <ActivationFunction Activation> -void DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float16_t, float16_t, float16_t ->::execute_tile( - int n_channels, - const void *weights_biases_ptr, - const float16_t *input, - const unsigned int in_row_stride, - const unsigned int in_col_stride, - float16_t *output, - const unsigned int out_row_stride, - const unsigned int out_col_stride -) -{ - // Instantiate pointers - const float16_t* __restrict__ inptr_base = input; - float16_t* __restrict__ outptr_base = output; - const float16_t* __restrict__ params = static_cast<const float16_t*>(weights_biases_ptr); - - // Perform the depthwise convolution - int channels_remaining = n_channels; - for (; channels_remaining >= 8; channels_remaining -= 8) - { - // Load input tile - float16x8_t u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - const float16_t* const inptr_row = inptr_base + i*in_row_stride; - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = vld1q_f16(inptr_row + j*in_col_stride); - } - } - inptr_base += 8; - - // Load weights tile - float16x8_t vbias = vld1q_f16(params); - params += 8; - - float16x8_t w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = vld1q_f16(params); - params += 8; - } - } - - // Perform the convolution - float16x8_t v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - v[out_i][out_j] = vbias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const unsigned int j = base_j + in_j; - - // v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - v[out_i][out_j] = vaddq_f16(v[out_i][out_j], vmulq_f16(w[in_i][in_j], u[i][j])); - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vmaxq_f16(v[out_i][out_j], vdupq_n_f16(0.0f)); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vminq_f16(v[out_i][out_j], vdupq_n_f16(6.0f)); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - float16_t* const outptr_row = outptr_base + i*out_row_stride; - for (unsigned int j = 0; j < OutputTileCols; j++) - { - vst1q_f16(outptr_row + j*out_col_stride, v[i][j]); - } - } - outptr_base += 8; - } - for (; channels_remaining; channels_remaining--) - { - // Load input tile - float16_t u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - const float16_t* const inptr_row = inptr_base + i*in_row_stride; - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = *(inptr_row + j*in_col_stride); - } - } - inptr_base++; - - // Load weights tile - float16_t bias = *(params++); - float16_t w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = *(params++); - } - } - - // Perform the convolution - float16_t v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - // Clear the accumulator - v[out_i][out_j] = bias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const int j = base_j + in_j; - v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::max<float16_t>(0.0f, v[out_i][out_j]); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::min<float16_t>(6.0f, v[out_i][out_j]); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - float16_t* const outptr_row = outptr_base + i*out_row_stride; - for (unsigned int j = 0; j < OutputTileCols; j++) - { - *(outptr_row + j*out_col_stride) = v[i][j]; - } - } - outptr_base++; - } -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <ActivationFunction Activation> -void DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float16_t, float16_t, float16_t ->::execute_tile( - int n_channels, - const void *weights_biases_ptr, - const float16_t * inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float16_t *outptrs[Base::output_tile_rows][Base::output_tile_cols] -) -{ - // Instantiate pointers - const float16_t* __restrict__ params = static_cast<const float16_t*>(weights_biases_ptr); - int n = 0; - - // Perform the depthwise convolution - int channels_remaining = n_channels; - for (; channels_remaining >= 8; channels_remaining -= 8, n += 8) - { - // Load input tile - float16x8_t u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = vld1q_f16(inptrs[i][j] + n); - } - } - - // Load weights tile - float16x8_t vbias = vld1q_f16(params); - params += 8; - - float16x8_t w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = vld1q_f16(params); - params += 8; - } - } - - // Perform the convolution - float16x8_t v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - v[out_i][out_j] = vbias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const unsigned int j = base_j + in_j; - - // v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - v[out_i][out_j] = vaddq_f16(v[out_i][out_j], vmulq_f16(w[in_i][in_j], u[i][j])); - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vmaxq_f16(v[out_i][out_j], vdupq_n_f16(0.0f)); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vminq_f16(v[out_i][out_j], vdupq_n_f16(6.0f)); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - for (unsigned int j = 0; j < OutputTileCols; j++) - { - vst1q_f16(outptrs[i][j] + n, v[i][j]); - } - } - } - for (; channels_remaining; channels_remaining--, n++) - { - // Load input tile - float16_t u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = *(inptrs[i][j] + n); - } - } - - // Load weights tile - float16_t bias = *(params++); - float16_t w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = *(params++); - } - } - - // Perform the convolution - float16_t v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - // Clear the accumulator - v[out_i][out_j] = bias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const int j = base_j + in_j; - v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::max<float16_t>(0.0f, v[out_i][out_j]); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::min<float16_t>(6.0f, v[out_i][out_j]); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - for (unsigned int j = 0; j < OutputTileCols; j++) - { - *(outptrs[i][j] + n) = v[i][j]; - } - } - } -} - -} // namespace depthwise -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp deleted file mode 100644 index b0d8126a40..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/impl_fp32_fp32.hpp +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright (c) 2018-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - * - * NOTE: Header to be included by implementation files only. - * - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - */ - -#include "arm.hpp" -#include "impl_base.hpp" - -#pragma once - -using namespace neon_convolution_kernels; - -namespace depthwise -{ - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float, float, float ->::DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, activation, - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float, float, float ->::DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - ActivationFunction activation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, - n_output_rows, n_output_cols, activation, - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <ActivationFunction Activation> -void DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float, float, float ->::execute_tile( - int n_channels, - const void *weights_biases_ptr, - const float *input, - const unsigned int in_row_stride, - const unsigned int in_col_stride, - float *output, - const unsigned int out_row_stride, - const unsigned int out_col_stride -) -{ - // Instantiate pointers - const float* __restrict__ inptr_base = input; - float* __restrict__ outptr_base = output; - const float* __restrict__ params = static_cast<const float*>(weights_biases_ptr); - - // Perform the depthwise convolution - int channels_remaining = n_channels; - for (; channels_remaining >= 4; channels_remaining -= 4) - { - // Load input tile - float32x4_t u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - const float* const inptr_row = inptr_base + i*in_row_stride; - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = vld1q_f32(inptr_row + j*in_col_stride); - } - } - inptr_base += 4; - - // Load weights tile - float32x4_t vbias = vld1q_f32(params); - params += 4; - - float32x4_t w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = vld1q_f32(params); - params += 4; - } - } - - // Perform the convolution - float32x4_t v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - v[out_i][out_j] = vbias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const unsigned int j = base_j + in_j; - - // v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - v[out_i][out_j] = vmlaq_f32(v[out_i][out_j], w[in_i][in_j], u[i][j]); - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vmaxq_f32(v[out_i][out_j], vdupq_n_f32(0.0f)); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vminq_f32(v[out_i][out_j], vdupq_n_f32(6.0f)); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - float* const outptr_row = outptr_base + i*out_row_stride; - for (unsigned int j = 0; j < OutputTileCols; j++) - { - vst1q_f32(outptr_row + j*out_col_stride, v[i][j]); - } - } - outptr_base += 4; - } - for (; channels_remaining; channels_remaining--) - { - // Load input tile - float u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - const float* const inptr_row = inptr_base + i*in_row_stride; - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = *(inptr_row + j*in_col_stride); - } - } - inptr_base++; - - // Load weights tile - float bias = *(params++); - float w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = *(params++); - } - } - - // Perform the convolution - float v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - // Clear the accumulator - v[out_i][out_j] = bias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const int j = base_j + in_j; - v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::max(0.0f, v[out_i][out_j]); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::min(6.0f, v[out_i][out_j]); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - float* const outptr_row = outptr_base + i*out_row_stride; - for (unsigned int j = 0; j < OutputTileCols; j++) - { - *(outptr_row + j*out_col_stride) = v[i][j]; - } - } - outptr_base++; - } -} - - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <ActivationFunction Activation> -void DepthwiseConvolution< - OutputTileRows, OutputTileCols, - KernelRows, KernelCols, StrideRows, StrideCols, - float, float, float ->::execute_tile( - int n_channels, - const void *weights_biases_ptr, - const float *inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - float *outptrs[Base::output_tile_rows][Base::output_tile_cols] -) -{ - const float* __restrict__ params = static_cast<const float*>(weights_biases_ptr); - - // Perform the depthwise convolution - int channels_remaining = n_channels; - int n = 0; - for (; channels_remaining >= 4; channels_remaining -= 4, n += 4) - { - // Load input tile - float32x4_t u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = vld1q_f32(inptrs[i][j] + n); - } - } - - // Load weights tile - float32x4_t vbias = vld1q_f32(params); - params += 4; - - float32x4_t w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = vld1q_f32(params); - params += 4; - } - } - - // Perform the convolution - float32x4_t v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - v[out_i][out_j] = vbias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const unsigned int j = base_j + in_j; - - // v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - v[out_i][out_j] = vmlaq_f32(v[out_i][out_j], w[in_i][in_j], u[i][j]); - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vmaxq_f32(v[out_i][out_j], vdupq_n_f32(0.0f)); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = vminq_f32(v[out_i][out_j], vdupq_n_f32(6.0f)); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - for (unsigned int j = 0; j < OutputTileCols; j++) - { - vst1q_f32(outptrs[i][j] + n, v[i][j]); - } - } - } - for (; channels_remaining; channels_remaining--, n++) - { - // Load input tile - float u[Base::inner_tile_rows][Base::inner_tile_cols]; - for (int i = 0; i < Base::inner_tile_rows; i++) - { - for (int j = 0; j < Base::inner_tile_cols; j++) - { - u[i][j] = *(inptrs[i][j] + n); - } - } - - // Load weights tile - float bias = *(params++); - float w[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - w[i][j] = *(params++); - } - } - - // Perform the convolution - float v[OutputTileRows][OutputTileCols]; - for (unsigned int out_i = 0; out_i < OutputTileRows; out_i++) - { - for (unsigned int out_j = 0; out_j < OutputTileCols; out_j++) - { - // Clear the accumulator - v[out_i][out_j] = bias; - - // Base co-ordinate - const int base_i = out_i * StrideRows; - const int base_j = out_j * StrideCols; - - // Fill the accumulator - for (unsigned int in_i = 0; in_i < KernelRows; in_i++) - { - const unsigned int i = base_i + in_i; - for (unsigned int in_j = 0; in_j < KernelCols; in_j++) - { - const int j = base_j + in_j; - v[out_i][out_j] += w[in_i][in_j] * u[i][j]; - } - } - - // Apply the activation function - if (Activation == ActivationFunction::ReLU || - Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::max(0.0f, v[out_i][out_j]); - } - if (Activation == ActivationFunction::ReLU6) - { - v[out_i][out_j] = std::min(6.0f, v[out_i][out_j]); - } - } - } - - // Store the output tile - for (unsigned int i = 0; i < OutputTileRows; i++) - { - for (unsigned int j = 0; j < OutputTileCols; j++) - { - *(outptrs[i][j] + n) = v[i][j]; - } - } - } -} - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qa8.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qa8.hpp deleted file mode 100644 index e8b4c7bc0f..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qa8.hpp +++ /dev/null @@ -1,511 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - * - * NOTE: Header to be included by implementation files only. - * - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - */ - -#include <limits> - -#include "arm.hpp" -#include "impl_base.hpp" -#include "depthwise_quantized.hpp" - -namespace depthwise -{ -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - const ActivationFunction activation, - const QAsymm8Params& weight_quantisation, - const QAsymm8Params& input_quantisation, - const QAsymm8Params& output_quantisation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : QAsymm8DepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, - activation, weight_quantisation, input_quantisation, output_quantisation, - QAsymm8RescaleParams::make_rescale_params(weight_quantisation, input_quantisation, output_quantisation), - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - const ActivationFunction activation, - const QAsymm8Params& weight_quantisation, - const QAsymm8Params& input_quantisation, - const QAsymm8Params& output_quantisation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : QAsymm8DepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, - n_output_rows, n_output_cols, - activation, weight_quantisation, input_quantisation, output_quantisation, - QAsymm8RescaleParams::make_rescale_params(weight_quantisation, input_quantisation, output_quantisation), - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - const ActivationFunction activation, - const QAsymm8Params& weight_quantisation, - const QAsymm8Params& input_quantisation, - const QAsymm8Params& output_quantisation, - const QAsymm8RescaleParams& rescale_params, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, activation, - padding_top, padding_left, padding_bottom, padding_right - ), - _weights_quant(weight_quantisation), - _inputs_quant(input_quantisation), - _output_quant(output_quantisation), - rescale_parameters(rescale_params) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::QAsymm8DepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - int n_output_rows, int n_output_cols, - const ActivationFunction activation, - const QAsymm8Params& weight_quantisation, - const QAsymm8Params& input_quantisation, - const QAsymm8Params& output_quantisation, - const QAsymm8RescaleParams& rescale_params, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, - n_output_rows, n_output_cols, activation, - padding_top, padding_left, padding_bottom, padding_right - ), - _weights_quant(weight_quantisation), - _inputs_quant(input_quantisation), - _output_quant(output_quantisation), - rescale_parameters(rescale_params) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -uint8_t QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::_input_padding_value(void) const -{ - return _inputs_quant.offset; -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -void QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::_pack_params( - void * const buffer, - const void * const weights, - const unsigned int weight_row_stride, - const unsigned int weight_col_stride, - const void * const biases -) const -{ - const uint8_t *wptr = static_cast<const uint8_t *>(weights); - const int32_t *bptr = static_cast<const int32_t *>(biases); - uint8_t *outptr = static_cast<uint8_t *>(buffer); - - // We set the vector length to use doubles on both Aarch64 and Aarch32. NOTE - // For SVE set this to half the vector length. - unsigned int veclen = 8; - - // While there are channels left to process, pack a vector length of them at - // a time and reduce the size of vector used as the size of the tensor - // decreases. - for ( - unsigned int n_channels = this->n_channels(); n_channels; - n_channels -= veclen, - outptr += veclen*(sizeof(int32_t) + this->kernel_rows*this->kernel_cols) - ) - { - // NOTE Ignore this section if using SVE, the vector length remains the - // same and we just don't fill a full register for the tail. - while (n_channels < veclen) - { - // Reduce the vector length to either 8 or 1 (scalar) - // TODO Support more vector lengths in `execute_tile`. - veclen = (veclen == 16) ? 8 : 1; - } - - // Get pointers to bias and weight portions of the output structure. - int32_t *out_bptr = reinterpret_cast<int32_t *>(outptr); - uint8_t *out_wptr = outptr + veclen*sizeof(int32_t); - - // Copy a vector length of elements - for (unsigned int n = 0; n < veclen && n < n_channels; n++) - { - const int32_t bias = (bptr != nullptr) ? *(bptr++) : 0; - out_bptr[n] = bias; - - for (unsigned int i = 0; i < KernelRows; i++) - { - uint8_t *row_outptr = out_wptr + i*KernelCols*veclen; - for (unsigned int j = 0; j < KernelCols; j++) - { - uint8_t w = *(wptr + i*weight_row_stride + j*weight_col_stride); - row_outptr[j*veclen + n] = w; - } - } - wptr++; - } - } -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename FInput, typename FOutput -> -static inline void tilefn( - int n_channels, - const void* packed_params, - FInput &get_input_ptr, - FOutput &get_output_ptr, - const int32_t clamp_max, - const int32_t clamp_min, - const uint8_t input_offset, - const uint8_t weight_offset, - const uint8_t output_offset, - const int32_t requant_multiplier, - const int32_t requant_shift -) -{ - constexpr int InnerTileRows = StrideRows * (OutputTileRows - 1) + KernelRows; - constexpr int InnerTileCols = StrideCols * (OutputTileCols - 1) + KernelCols; - - // Offset into channels - int channel = 0; - - // Byte type pointer to weights and biases - const uint8_t *wbptr = static_cast<const uint8_t *>(packed_params); - - for (; n_channels >= 8; n_channels -= 8, channel += 8) - { - const int32x4_t biases[2] = { - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr)), - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 4), - }; - wbptr += 8*sizeof(int32_t); - - int16x8_t weights[KernelRows][KernelCols]; - const uint8x8_t woffset = vdup_n_u8(weight_offset); - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - const uint8x8_t w = vld1_u8(wbptr); - weights[i][j] = reinterpret_cast<int16x8_t>(vsubl_u8(w, woffset)); - wbptr += 8; - } - } - - int16x8_t inputs[InnerTileRows][InnerTileCols]; - const uint8x8_t ioffset = vdup_n_u8(input_offset); - for (unsigned int i = 0; i < InnerTileRows; i++) - { - for (unsigned int j = 0; j < InnerTileCols; j++) - { - const auto x = vld1_u8(get_input_ptr(i, j, channel)); - inputs[i][j] = reinterpret_cast<int16x8_t>(vsubl_u8(x, ioffset)); - } - } - - for (unsigned int oi = 0; oi < OutputTileRows; oi++) - { - for (unsigned int oj = 0; oj < OutputTileCols; oj++) - { - int32x4_t acc_a = biases[0], acc_b = biases[1]; - - for (unsigned int wi = 0; wi < KernelRows; wi++) - { - for (unsigned int wj = 0; wj < KernelCols; wj++) - { - const auto w = weights[wi][wj]; - const auto x = inputs[oi * StrideRows + wi][oj * StrideCols + wj]; -#ifndef __aarch64__ - acc_a = vmlal_s16(acc_a, vget_low_s16(w), vget_low_s16(x)); - acc_b = vmlal_s16(acc_b, vget_high_s16(w), vget_high_s16(x)); -#else - asm("smlal %[acc_a].4s, %[w].4h, %[x].4h\n" - "smlal2 %[acc_b].4s, %[w].8h, %[x].8h\n" - : [acc_a] "+w"(acc_a), [acc_b] "+w"(acc_b) - : [w] "w"(w), [x] "w"(x)); -#endif // __aarch64__ - } - } - - int32x4_t final_accs[2]; - for (unsigned int i = 0; i < 2; i++) - { - const int32x4_t y = rounding_divide_by_exp2( - saturating_doubling_high_mul((i == 0 ? acc_a : acc_b), requant_multiplier), - requant_shift); - const int32x4_t offset = reinterpret_cast<int32x4_t>(vdupq_n_u32(output_offset)); - final_accs[i] = vaddq_s32(y, offset); - final_accs[i] = vmaxq_s32(final_accs[i], vdupq_n_s32(clamp_min)); - final_accs[i] = vminq_s32(final_accs[i], vdupq_n_s32(clamp_max)); - } - -#ifndef __aarch64__ - const int16x8x2_t zelems = vuzpq_s16(vreinterpretq_s16_s32(final_accs[0]), - vreinterpretq_s16_s32(final_accs[1])); - const int8x16_t elems = vreinterpretq_s8_s16(zelems.val[0]); - - const int8x16x2_t zoutput = vuzpq_s8(elems, elems); - const uint8x8_t output = - vget_low_u8(vreinterpretq_u8_s8(zoutput.val[0])); - vst1_u8(get_output_ptr(oi, oj, channel), output); -#else - const int8x16_t elems = vreinterpretq_s8_s16( - vuzp1q_s16(vreinterpretq_s16_s32(final_accs[0]), - vreinterpretq_s16_s32(final_accs[1]))); - const uint8x8_t output = - vget_low_u8(vreinterpretq_u8_s8(vuzp1q_s8(elems, elems))); - vst1_u8(get_output_ptr(oi, oj, channel), output); -#endif // __aarch64__ - } - } - } - for (; n_channels; n_channels--, channel++) - { - // Load bias - const int32_t bias = *reinterpret_cast<const int32_t *>(wbptr); - wbptr += sizeof(int32_t); - - // Load weights - int16_t weights[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - weights[i][j] = *(wbptr++) - weight_offset; - } - } - - // Load the input activations - int16_t inputs[InnerTileRows][InnerTileCols]; - for (unsigned int i = 0; i < InnerTileRows; i++) - { - for (unsigned int j = 0; j < InnerTileCols; j++) - { - inputs[i][j] = *(get_input_ptr(i, j, channel)) - input_offset; - } - } - - // Perform the convolution - for (unsigned int oi = 0; oi < OutputTileRows; oi++) - { - for (unsigned int oj = 0; oj < OutputTileCols; oj++) - { - int32_t acc = bias; - - for (unsigned int wi = 0; wi < KernelRows; wi++) - { - for (unsigned int wj = 0; wj < KernelCols; wj++) - { - const auto w = weights[wi][wj], x = inputs[oi*StrideRows + wi][oj*StrideCols + wj]; - acc += w * x; - } - } - - // Requantize - acc = rounding_divide_by_exp2( - saturating_doubling_high_mul(acc, requant_multiplier), - requant_shift); - acc += output_offset; - acc = std::max(acc, clamp_min); - acc = std::min(acc, clamp_max); - uint8_t output = static_cast<uint8_t>(acc); - *(get_output_ptr(oi, oj, channel)) = output; - } - } - } -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename FInput, typename FOutput -> -static inline void execute_tilefn( - int n_channels, - const void* packed_params, - const nck::ActivationFunction actfn, - FInput &get_input_ptr, - FOutput &get_output_ptr, - const QAsymm8Params &input_quant, - const QAsymm8Params &weight_quant, - const QAsymm8Params &output_quant, - const QAsymm8RescaleParams &requant -) { - // Compute min/max clamp values - int32_t clamp_min = std::numeric_limits<uint8_t>::min(); - int32_t clamp_max = std::numeric_limits<uint8_t>::max(); - - if (actfn == nck::ActivationFunction::ReLU || - actfn == nck::ActivationFunction::ReLU6) { - const int32_t bottom_rail = output_quant.offset; - clamp_min = std::max(clamp_min, bottom_rail); - } - - if (actfn == nck::ActivationFunction::ReLU6) { - const int32_t top_rail = output_quant.quantize(6.0f); - clamp_max = std::min(clamp_max, top_rail); - } - - // Call the tile execution method - tilefn<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, - StrideCols>(n_channels, packed_params, get_input_ptr, get_output_ptr, - clamp_max, clamp_min, input_quant.offset, - weight_quant.offset, output_quant.offset, - requant.multiplier, requant.shift); -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <nck::ActivationFunction Activation> -void QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - uint8_t* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride -) { - // Construct methods to get pointers - const auto get_input_ptr = [inptr, in_row_stride, in_col_stride]( - const int i, const int j, const int channel) { - return inptr + i * in_row_stride + j * in_col_stride + channel; - }; - - const auto get_output_ptr = [outptr, out_row_stride, out_col_stride]( - const int i, const int j, const int channel) { - return outptr + i * out_row_stride + j * out_col_stride + channel; - }; - - execute_tilefn<OutputTileRows, OutputTileCols, KernelRows, KernelCols, - StrideRows, StrideCols>( - n_channels, packed_params, Activation, get_input_ptr, get_output_ptr, - _inputs_quant, _weights_quant, _output_quant, rescale_parameters); -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <nck::ActivationFunction Activation> -void QAsymm8DepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - uint8_t* outptrs[Base::output_tile_rows][Base::output_tile_cols] -) { - // Construct methods to get pointers - const auto get_input_ptr = [inptrs](const int i, const int j, - const int channel) { - return inptrs[i][j] + channel; - }; - - const auto get_output_ptr = [outptrs](const int i, const int j, - const int channel) { - return outptrs[i][j] + channel; - }; - - // Call the tile execution method - execute_tilefn<OutputTileRows, OutputTileCols, KernelRows, KernelCols, - StrideRows, StrideCols>( - n_channels, packed_params, Activation, get_input_ptr, get_output_ptr, - _inputs_quant, _weights_quant, _output_quant, rescale_parameters); -} - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qs8_per_channel.hpp b/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qs8_per_channel.hpp deleted file mode 100644 index 68e20d98a9..0000000000 --- a/src/core/NEON/kernels/convolution/depthwise/impl_qa8_qs8_per_channel.hpp +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - * - * NOTE: Header to be included by implementation files only. - * - * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - */ - -#include <limits> - -#include "arm.hpp" -#include "impl_base.hpp" -#include "depthwise_quantized.hpp" - -#pragma once - -namespace { - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename FInput, typename FOutput -> -static inline void tilefn_hybrid( - int n_channels, - const void* packed_params, - FInput &get_input_ptr, - FOutput &get_output_ptr, - int32_t clamp_min, - int32_t clamp_max, - uint8_t input_offset, - uint8_t output_offset -) -{ - constexpr int InnerTileRows = StrideRows * (OutputTileRows - 1) + KernelRows; - constexpr int InnerTileCols = StrideCols * (OutputTileCols - 1) + KernelCols; - - // Offset into channels - int channel = 0; - - // Byte type pointer to weights and biases - const int8_t *wbptr = static_cast<const int8_t *>(packed_params); - - for (; n_channels >= 8; n_channels -= 8, channel += 8) - { - const int32x4_t biases[2] = { - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr)), - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 4), - }; - const int32x4_t multipliers[2] = { - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 8), - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 12), - }; - const int32x4_t shifts[2] = { - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 16), - vld1q_s32(reinterpret_cast<const int32_t *>(wbptr) + 20), - }; - wbptr += 24*sizeof(int32_t); - - int16x8_t weights[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - const auto w = vld1_s8(wbptr); - weights[i][j] = reinterpret_cast<int16x8_t>(vmovl_s8(w)); - wbptr += 8; - } - } - - int16x8_t inputs[InnerTileRows][InnerTileCols]; - const uint8x8_t ioffset = vdup_n_u8(input_offset); - for (unsigned int i = 0; i < InnerTileRows; i++) - { - for (unsigned int j = 0; j < InnerTileCols; j++) - { - const auto x = vld1_u8(get_input_ptr(i, j, channel)); - inputs[i][j] = reinterpret_cast<int16x8_t>(vsubl_u8(x, ioffset)); - } - } - - for (unsigned int oi = 0; oi < OutputTileRows; oi++) - { - for (unsigned int oj = 0; oj < OutputTileCols; oj++) - { - int32x4_t accs[2]; - for (unsigned int i = 0; i < 2; i++) - { - accs[i] = biases[i]; - } - - for (unsigned int wi = 0; wi < KernelRows; wi++) - { - for (unsigned int wj = 0; wj < KernelCols; wj++) - { - const auto w = weights[wi][wj]; - const auto x = inputs[oi * StrideRows + wi][oj * StrideCols + wj]; - accs[0] = vmlal_s16(accs[0], vget_low_s16(w), vget_low_s16(x)); - accs[1] = vmlal_s16(accs[1], vget_high_s16(w), vget_high_s16(x)); - } - } - - int32x4_t final_accs[2]; - for (unsigned int i = 0; i < 2; i++) - { - const int32x4_t y = rounding_divide_by_exp2( - saturating_doubling_high_mul(accs[i], multipliers[i]), - shifts[i]); - const int32x4_t offset = reinterpret_cast<int32x4_t>(vdupq_n_u32(output_offset)); - final_accs[i] = vaddq_s32(y, offset); - final_accs[i] = vmaxq_s32(final_accs[i], vdupq_n_s32(clamp_min)); - final_accs[i] = vminq_s32(final_accs[i], vdupq_n_s32(clamp_max)); - } - - const auto elems_s16 = vuzpq_s16(vreinterpretq_s16_s32(final_accs[0]), - vreinterpretq_s16_s32(final_accs[1])); - const int8x16_t elems = vreinterpretq_s8_s16(elems_s16.val[0]); - const uint8x8_t output = - vget_low_u8(vreinterpretq_u8_s8(vuzpq_s8(elems, elems).val[0])); - - vst1_u8(get_output_ptr(oi, oj, channel), output); - } - } - } - - for (; n_channels; n_channels--, channel++) - { - // Load bias - const int32_t bias = *reinterpret_cast<const int32_t *>(wbptr); - const int32_t multiplier = *reinterpret_cast<const int32_t *>(wbptr + sizeof(int32_t)); - const int32_t shift = *reinterpret_cast<const int32_t *>(wbptr + 2*sizeof(int32_t)); - - wbptr += 3*sizeof(int32_t); - - // Load weights - int16_t weights[KernelRows][KernelCols]; - for (unsigned int i = 0; i < KernelRows; i++) - { - for (unsigned int j = 0; j < KernelCols; j++) - { - weights[i][j] = *(wbptr++); - } - } - - // Load the input activations - int16_t inputs[InnerTileRows][InnerTileCols]; - for (unsigned int i = 0; i < InnerTileRows; i++) - { - for (unsigned int j = 0; j < InnerTileCols; j++) - { - inputs[i][j] = *(get_input_ptr(i, j, channel)) - input_offset; - } - } - - // Perform the convolution - for (unsigned int oi = 0; oi < OutputTileRows; oi++) - { - for (unsigned int oj = 0; oj < OutputTileCols; oj++) - { - int32_t acc = bias; - - for (unsigned int wi = 0; wi < KernelRows; wi++) - { - for (unsigned int wj = 0; wj < KernelCols; wj++) - { - const auto w = weights[wi][wj], x = inputs[oi*StrideRows + wi][oj*StrideCols + wj]; - acc += w * x; - } - } - - // Requantize - acc = rounding_divide_by_exp2( - saturating_doubling_high_mul(acc, multiplier), - -shift); - acc += output_offset; - acc = std::max(acc, clamp_min); - acc = std::min(acc, clamp_max); - uint8_t output = static_cast<uint8_t>(acc); - *(get_output_ptr(oi, oj, channel)) = output; - } - } - } -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols, - typename FInput, typename FOutput -> -static inline void execute_tilefn_hybrid( - int n_channels, - const void* packed_params, - const ActivationFunction actfn, - const qasymm8::QAsymm8Params &input_quant, - const qasymm8::QAsymm8Params &output_quant, - FInput &get_input_ptr, - FOutput &get_output_ptr) { - - // Compute min/max clamp values - int32_t clamp_min = std::numeric_limits<uint8_t>::min(); - int32_t clamp_max = std::numeric_limits<uint8_t>::max(); - - if (actfn == ActivationFunction::ReLU) { - clamp_min = output_quant.offset; - } - - // Disabling Relu6 for now - if (actfn == ActivationFunction::ReLU6) { - const int32_t top_rail = output_quant.quantize(6.0f); - clamp_max = std::min(clamp_max, top_rail); - } - - // Call the tile execution method - tilefn_hybrid<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, - StrideCols>(n_channels, packed_params, get_input_ptr, get_output_ptr, clamp_min, clamp_max, input_quant.offset, output_quant.offset); -} -} - - - -namespace depthwise { -using namespace qsymm8; -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -QSymm8HybridPerChannelDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::QSymm8HybridPerChannelDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - const ActivationFunction activation, - const QSymm8PerChannelParams& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : QSymm8HybridPerChannelDepthwiseConvolution( - n_batches, n_input_rows, n_input_cols, n_channels, - activation, weight_quantisation, input_quantisation, output_quantisation, - QSymm8PerChannelRescaleParams::make_rescale_params(weight_quantisation, input_quantisation, output_quantisation), - padding_top, padding_left, padding_bottom, padding_right - ) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -QSymm8HybridPerChannelDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::QSymm8HybridPerChannelDepthwiseConvolution( - int n_batches, int n_input_rows, int n_input_cols, int n_channels, - const ActivationFunction activation, - const QSymm8PerChannelParams& weight_quantisation, - const qasymm8::QAsymm8Params& input_quantisation, - const qasymm8::QAsymm8Params& output_quantisation, - const QSymm8PerChannelRescaleParams& rescale_params, - unsigned int padding_top, - unsigned int padding_left, - unsigned int padding_bottom, - unsigned int padding_right -) : Base( - n_batches, n_input_rows, n_input_cols, n_channels, activation, - padding_top, padding_left, padding_bottom, padding_right - ), - _weights_quant(weight_quantisation), - _input_quant(input_quantisation), - _output_quant(output_quantisation), - _rescale_parameters(rescale_params) -{ -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -uint8_t QSymm8HybridPerChannelDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::_input_padding_value(void) const -{ - return _input_quant.offset; -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -void QSymm8HybridPerChannelDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::_pack_params( - void * const buffer, - const void * const weights, - const unsigned int weight_row_stride, - const unsigned int weight_col_stride, - const void * const biases -) const -{ - const int8_t *wptr = static_cast<const int8_t *>(weights); - const int32_t *bptr = static_cast<const int32_t *>(biases); - const int32_t *mptr = static_cast<const int32_t *>(_rescale_parameters.multipliers.data()); - const int32_t *sptr = static_cast<const int32_t *>(_rescale_parameters.shifts.data()); - int8_t *outptr = static_cast<int8_t *>(buffer); - - // We set the vector length to use doubles on both Aarch64 and Aarch32. NOTE - // For SVE set this to half the vector length. - unsigned int veclen = 8; - - // While there are channels left to process, pack a vector length of them at - // a time and reduce the size of vector used as the size of the tensor - // decreases. - for ( - unsigned int n_channels = this->n_channels(); n_channels; - n_channels -= veclen, - outptr += veclen*(3*sizeof(int32_t) + this->kernel_rows*this->kernel_cols) - ) - { - // NOTE Ignore this section if using SVE, the vector length remains the - // same and we just don't fill a full register for the tail. - while (n_channels < veclen) - { - // Reduce the vector length to either 8 or 1 (scalar) - // TODO Support more vector lengths in `execute_tile`. - veclen = (veclen == 16) ? 8 : 1; - } - - // Get pointers to bias and weight portions of the output structure. - int32_t *out_bptr = reinterpret_cast<int32_t *>(outptr); - int32_t *out_mptr = reinterpret_cast<int32_t *>(outptr + veclen*sizeof(int32_t)); - int32_t *out_sptr = reinterpret_cast<int32_t *>(outptr + 2*veclen*sizeof(int32_t)); - int8_t *out_wptr = outptr + 3*veclen*sizeof(int32_t); - - // Copy a vector length of elements - for (unsigned int n = 0; n < veclen && n < n_channels; n++) - { - const int32_t bias = (bptr != nullptr) ? *(bptr++) : 0; - const int32_t multiplier = (mptr != nullptr) ? *(mptr++) : 0; - const int32_t shift = (sptr != nullptr) ? *(sptr++) : 0; - - out_bptr[n] = bias; - out_mptr[n] = multiplier; - out_sptr[n] = -shift; - - for (unsigned int i = 0; i < KernelRows; i++) - { - int8_t *row_outptr = out_wptr + i*KernelCols*veclen; - for (unsigned int j = 0; j < KernelCols; j++) - { - int8_t w = *(wptr + i*weight_row_stride + j*weight_col_stride); - row_outptr[j*veclen + n] = w; - } - } - wptr++; - } - } -} - - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <ActivationFunction Activation> -void QSymm8HybridPerChannelDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - uint8_t* outptr, - unsigned int out_row_stride, - unsigned int out_col_stride -) { - - // Construct methods to get pointers - const auto get_input_ptr = [inptr, in_row_stride, in_col_stride]( - const int i, const int j, const int channel) { - return inptr + i * in_row_stride + j * in_col_stride + channel; - }; - - const auto get_output_ptr = [outptr, out_row_stride, out_col_stride]( - const int i, const int j, const int channel) { - return outptr + i * out_row_stride + j * out_col_stride + channel; - }; - - execute_tilefn_hybrid<OutputTileRows, OutputTileCols, KernelRows, KernelCols, - StrideRows, StrideCols>( - n_channels, packed_params, Activation, _input_quant, _output_quant, get_input_ptr, get_output_ptr); -} - -template < - unsigned int OutputTileRows, unsigned int OutputTileCols, - unsigned int KernelRows, unsigned int KernelCols, - unsigned int StrideRows, unsigned int StrideCols -> -template <ActivationFunction Activation> -void QSymm8HybridPerChannelDepthwiseConvolution< - OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols ->::execute_tile( - int n_channels, - const void* packed_params, - const uint8_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols], - uint8_t* outptrs[Base::output_tile_rows][Base::output_tile_cols] -) { - // Construct methods to get pointers - const auto get_input_ptr = [inptrs](const int i, const int j, - const int channel) { - return inptrs[i][j] + channel; - }; - - const auto get_output_ptr = [outptrs](const int i, const int j, - const int channel) { - return outptrs[i][j] + channel; - }; - - // Call the tile execution method - execute_tilefn_hybrid<OutputTileRows, OutputTileCols, KernelRows, KernelCols, - StrideRows, StrideCols>( - n_channels, packed_params, Activation, _input_quant, _output_quant, get_input_ptr, get_output_ptr); -} - -} // namespace depthwise diff --git a/src/core/NEON/kernels/convolution/winograd/input_transform.hpp b/src/core/NEON/kernels/convolution/winograd/input_transform.hpp new file mode 100644 index 0000000000..265551288d --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/input_transform.hpp @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "winograd.hpp" + +#include "src/core/NEON/kernels/arm_conv/addressing.hpp" +#include <algorithm> +#include <cstring> +#include <functional> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +namespace { + +template <typename T> +constexpr T iceildiv(const T a, const T b) +{ + return (a + b - 1) / b; +} + +} + +/* Driver class for the Winograd input transforms. + * + * This provides a base implementation which handles iteration over the input + * tensor; subclasses are responsible for managing working space and executing + * the transform on individual tiles. + */ +template <typename TIn, typename TOut=TIn> +class TransformBase : public ITransform +{ + const std::string m_name; + const unsigned int m_input_rows, m_input_cols; + + protected: + virtual size_t get_working_space_per_thread(const ConvolutionArgs &) const + { + return 0; + } + + virtual void initialise_thread_working_space(const ConvolutionArgs &, void *) const + { + // Nothing to do + } + + virtual void execute_tile( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_row, size_t ld_in_col, + TOut *outptr, size_t ld_out_matrix, + unsigned int pad_top, unsigned int valid_rows, + unsigned int pad_left, unsigned int valid_cols, + void *working_space + ) const = 0; + + void execute_internal( + const ConvolutionArgs &args, + const TIn *inptr, size_t ld_in_batch, size_t ld_in_row, size_t ld_in_col, + TOut *outptr, size_t ld_out_batch, size_t ld_out_matrix, size_t ld_out_row, + void *working_space, unsigned int thread_id, unsigned int n_threads + ) const + { + // Get the working space for this thread, and initialise it. + working_space = reinterpret_cast<char *>(working_space) + + this->get_working_space_per_thread(args) * thread_id; + this->initialise_thread_working_space(args, working_space); + + // Get tile traversal parameters + const auto tile_stride_rows = std::max(1u, m_input_rows - args.kernel_shape.rows + 1); + const auto tile_stride_cols = std::max(1u, m_input_cols - args.kernel_shape.cols + 1); + const auto n_tile_rows = iceildiv( + args.output_shape.rows, m_input_rows - args.kernel_shape.rows + 1); + const auto n_tile_cols = iceildiv( + args.output_shape.cols, m_input_cols - args.kernel_shape.cols + 1); + + // Execute over all batches + for (unsigned int batch = 0; batch < args.n_batches; batch++) + { + auto outptr_tile = outptr + thread_id * n_tile_cols * ld_out_row; + + // For a single batch, stripe the rows over the threads. + for (auto tile_i = thread_id; tile_i < n_tile_rows; tile_i += n_threads) + { + // Compute pointers and padding for this row of tiles + const auto start_i = tile_i * tile_stride_rows; + const auto pad_top = start_i < args.pad_top ? args.pad_top - start_i : 0; + const auto inptr_row = inptr + (pad_top ? 0 : start_i - args.pad_top) * ld_in_row; + const auto valid_rows = args.input_shape.rows - (pad_top ? 0 : start_i - args.pad_top); + + // Iterate over columns + for (auto tile_j = 0u; tile_j < n_tile_cols; tile_j++) + { + // Compute pointers and padding for this tile, then delegate to + // execute the kernel. + const auto start_j = tile_j * tile_stride_cols; + const auto pad_left = start_j < args.pad_left ? args.pad_left - start_j : 0; + const auto inptr_tile = inptr_row + (pad_left ? 0 : start_j - args.pad_left) * ld_in_col; + const auto valid_cols = args.input_shape.cols - (pad_left ? 0 : start_j - args.pad_left); + + this->execute_tile( + args.n_input_channels, + inptr_tile, ld_in_row, ld_in_col, + outptr_tile, ld_out_matrix, + pad_top, valid_rows, pad_left, valid_cols, + working_space + ); + outptr_tile += ld_out_row; + } + + outptr_tile += (n_threads - 1) * n_tile_cols * ld_out_row; + } + + inptr += ld_in_batch; + outptr += ld_out_batch; + } + } + + public: + TransformBase(const std::string &name, unsigned int input_rows, unsigned int input_cols) + : m_name(name), m_input_rows(input_rows), m_input_cols(input_cols) + { + } + + const std::string &get_name(void) const override { return m_name; } + + unsigned int get_input_rows(void) const override final { return m_input_rows; } + unsigned int get_input_cols(void) const override final { return m_input_cols; } + + size_t get_working_space_size(const ConvolutionArgs &args, unsigned int n_threads) const override + { + return n_threads * this->get_working_space_per_thread(args); + } + + void execute( + const ConvolutionArgs &args, + const void *inptr, size_t ld_in_batch, size_t ld_in_row, size_t ld_in_col, + void *outptr, size_t ld_out_batch, size_t ld_out_matrix, size_t ld_out_row, + void *working_space, unsigned int thread_id, unsigned int n_threads + ) const override + { + execute_internal( + args, + reinterpret_cast<const TIn *>(inptr), ld_in_batch, ld_in_row, ld_in_col, + reinterpret_cast<TOut *>(outptr), ld_out_batch, ld_out_matrix, ld_out_row, + working_space, thread_id, n_threads + ); + } +}; + +template <typename TIn, typename TOut=TIn> +class TransformDirect : public TransformBase<TIn, TOut> +{ + using Kernel = std::function<void( + unsigned int, // Number of channels + const TIn *, size_t, size_t, // Pointer to first valid input element, row and column stride + unsigned int, unsigned int, unsigned int, unsigned int, // Top, left, bottom and right padding + TOut *, size_t // Base output pointer, stride between matrices + )>; + const Kernel m_kernel; + + protected: + void execute_tile( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_row, size_t ld_in_col, + TOut *outptr, size_t ld_out_matrix, + unsigned int pad_top, unsigned int valid_rows, + unsigned int pad_left, unsigned int valid_cols, + void * + ) const override + { + const auto end_i = this->get_input_rows() - pad_top; + const auto pad_bottom = end_i < valid_rows ? 0 : end_i - valid_rows; + const auto end_j = this->get_input_cols() - pad_left; + const auto pad_right = end_j < valid_cols ? 0 : end_j - valid_cols; + + // Execute the kernel + m_kernel( + n_channels, inptr, ld_in_row, ld_in_col, + pad_top, pad_left, pad_bottom, pad_right, + outptr, ld_out_matrix + ); + } + + public: + TransformDirect(const std::string &name, unsigned int input_rows, unsigned int input_cols, Kernel kernel) + : TransformBase<TIn, TOut>(name, input_rows, input_cols), m_kernel(kernel) + { + } +}; + +template <typename TIn, typename TOut=TIn> +class TransformIndirect : public TransformBase<TIn, TOut> +{ + using Kernel = std::function<void( + unsigned int, // Number of channels + const TIn *const *, // Input pointers (one per point) + TOut *, size_t // Base output pointer, stride between matrices + )>; + const Kernel m_kernel; + + struct Workspace + { + const TIn **inptrs; + const TIn *input_buffer; + }; + + size_t sizeof_inptr_array(void) const + { + return sizeof(const TIn **) * this->get_input_rows() * this->get_input_cols(); + } + + protected: + size_t get_working_space_per_thread(const ConvolutionArgs &args) const override + { + return sizeof(Workspace) + sizeof_inptr_array() + sizeof(TIn) * args.n_input_channels; + } + + void initialise_thread_working_space(const ConvolutionArgs &args, void *buffer) const override + { + Workspace *ws = reinterpret_cast<Workspace *>(buffer); + buffer = ws + 1; + + ws->inptrs = reinterpret_cast<const TIn **>(buffer); + buffer = reinterpret_cast<char *>(buffer) + sizeof_inptr_array(); + + ws->input_buffer = reinterpret_cast<const TIn *>(buffer); + memset(buffer, 0, sizeof(TIn) * args.n_input_channels); + } + + void execute_tile( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_row, size_t ld_in_col, + TOut *outptr, size_t ld_out_matrix, + unsigned int pad_top, unsigned int valid_rows, + unsigned int pad_left, unsigned int valid_cols, + void *working_space + ) const override + { + // Get the working space + auto ws = reinterpret_cast<Workspace *>(working_space); + + // Construct the input pointer array based on the given arguments + fill_pointer_array<const TIn>( + ws->inptrs, this->get_input_rows(), this->get_input_cols(), + inptr, ld_in_row, ld_in_col, + ws->input_buffer, + pad_top, valid_rows, + pad_left, valid_cols + ); + + // Execute the kernel + m_kernel(n_channels, ws->inptrs, outptr, ld_out_matrix); + } + + public: + TransformIndirect(const std::string &name, unsigned int input_rows, unsigned int input_cols, Kernel kernel) + : TransformBase<TIn, TOut>(name, input_rows, input_cols), m_kernel(kernel) + { + } +}; + +template <typename TIn, typename TOut=TIn> +class TransformUnpadded : public TransformBase<TIn, TOut> +{ + using Kernel = std::function<void( + unsigned int, // Number of channels + const TIn *, size_t, size_t, // Pointer to first input element, row and column stride + TOut *, size_t // Base output pointer, stride between matrices + )>; + const Kernel m_kernel; + + protected: + size_t get_working_space_per_thread(const ConvolutionArgs &args) const override + { + const auto input_points = this->get_input_rows() * this->get_input_cols(); + return sizeof(TIn) * input_points * args.n_input_channels; + } + + void execute_tile( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_row, size_t ld_in_col, + TOut *const outptr, const size_t ld_out_matrix, + const unsigned int pad_top, const unsigned int valid_rows, + const unsigned int pad_left, const unsigned int valid_cols, + void *const working_space + ) const override + { + // If there's any padding, then copy the valid portion of the tensor into + // the working space and reset the pointer, row and column strides to point + // at this copy of the data. + if (pad_top || valid_rows < this->get_input_rows() || + pad_left || valid_cols < this->get_input_cols()) + { + const auto patch_ld_col = n_channels; + const auto patch_ld_row = patch_ld_col * this->get_input_cols(); + auto patch = reinterpret_cast<TIn *>(working_space) + + pad_top*patch_ld_row + pad_left*patch_ld_col; + + // Fill the input patch with padding + memset(working_space, 0, sizeof(TIn) * this->get_input_rows() * patch_ld_row); + + // Determine the bounds for which to copy + const auto last_i = std::min(valid_rows + pad_top, this->get_input_rows()); + const auto last_j = std::min(valid_cols + pad_left, this->get_input_cols()); + + // Copy across the valid portion of the patch + for (auto i = pad_top; i < last_i; i++) + { + auto inptr_col = inptr; + inptr += ld_in_row; + + auto patch_col = patch; + patch += patch_ld_row; + + for (auto j = pad_left; j < last_j; j++) + { + // Perform the copy and progress both input and patch pointers + memcpy(patch_col, inptr_col, n_channels * sizeof(TIn)); + inptr_col += ld_in_col; + patch_col += patch_ld_col; + } + } + + // Override the input pointer and strides + inptr = reinterpret_cast<const TIn *>(working_space); + ld_in_col = patch_ld_col; + ld_in_row = patch_ld_row; + } + + // Call the kernel + m_kernel(n_channels, inptr, ld_in_row, ld_in_col, outptr, ld_out_matrix); + } + + public: + TransformUnpadded(const std::string &name, unsigned int input_rows, unsigned int input_cols, Kernel kernel) + : TransformBase<TIn, TOut>(name, input_rows, input_cols), m_kernel(kernel) + { + } + + /* Utility method which can be used to get a transposed version of a kernel, + * this just calls the kernel with the input row and column strides reversed. + */ + static constexpr Kernel get_transposed_kernel(const Kernel &kernel) + { + return [kernel] ( + const unsigned int n_channels, + const TIn *const inptr, const size_t ld_in_row, const size_t ld_in_col, + TOut *const outptr, const size_t ld_out_matrix + ) { + kernel(n_channels, inptr, ld_in_col, ld_in_row, outptr, ld_out_matrix); + }; + } +}; + +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp16_6x6.cpp index d0ce307988..ad759b225e 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp16_fp16_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp16_6x6.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,20 +21,22 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#include "arm.hpp" -#include "input.hpp" +#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) -namespace winograd -{ -template <> -void InputTransform<6, 6, __fp16, __fp16, WinogradRoots::Integers>::transform_tile( - const int n_channels, +#include <arm_neon.h> +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +void a64_fp16_6x6( + const unsigned int n_channels, const __fp16* const input_base, - const int input_row_stride, - const int input_col_stride, + const size_t input_row_stride, + const size_t input_col_stride, __fp16* outptr, - const int matrix_stride + const size_t matrix_stride ) { constexpr int inner_tile_rows = 6; @@ -271,7 +273,8 @@ void InputTransform<6, 6, __fp16, __fp16, WinogradRoots::Integers>::transform_ti } } -template class InputTransform<6, 6, __fp16, __fp16, WinogradRoots::Integers>; - +} // namespace input_transform } // namespace winograd -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
\ No newline at end of file +} // namespace arm_conv + +#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp index 0095e6c96b..a2c04e0d8d 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_6x6_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,33 +22,32 @@ * SOFTWARE. */ -#include "arm.hpp" -#include "input.hpp" +#ifdef __aarch64__ -namespace winograd -{ +#include <cstddef> -#ifdef __aarch64__ +namespace arm_conv { +namespace winograd { +namespace input_transform { -template <> -void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile( - int n_channels, - const float* input_base, - const int input_row_stride, - const int input_col_stride, - float* matrix_base, - const int matrix_stride +void a64_fp32_6x6( + unsigned int n_channels, + const float *input_base, + const size_t input_row_stride, + const size_t input_col_stride, + float *matrix_base, + const size_t matrix_stride ) { const float pcoeffs[4] = {1.0f, 2.0f, 4.0f, 5.0f}; __asm__ __volatile__( "ldr q0, [%[pcoeffs]]\n" "add x25, %[inptr0], %[input_row_stride]\n" - "add x9, %[input_col_stride1], %[input_col_stride1]\n" + "add x10, %[input_col_stride1], %[input_col_stride1]\n" "add x16, x25, %[input_row_stride]\n" - "add x19, x9, %[input_col_stride1]\n" + "add x8, x10, %[input_col_stride1]\n" "add x26, x16, %[input_row_stride]\n" - "add x20, x19, %[input_col_stride1]\n" + "add x20, x8, %[input_col_stride1]\n" "add x17, x26, %[input_row_stride]\n" "add x21, x20, %[input_col_stride1]\n" "add x27, x17, %[input_row_stride]\n" @@ -65,37 +64,37 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "blt 2f\n" "1:\n" "ldr q8, [%[inptr0], x20]\n" - "ldr q2, [%[inptr0], x9]\n" + "ldr q2, [%[inptr0], x10]\n" "mov v14.16b, v8.16b\n" "ldr q9, [%[inptr0]]\n" "mov v10.16b, v8.16b\n" "ldr q1, [%[inptr0], x21]\n" "fmla v14.4s, v9.4s, v0.s[2]\n" - "ldr q4, [%[inptr0], x19]\n" + "ldr q4, [%[inptr0], x8]\n" "mov v9.16b, v8.16b\n" "ldr q12, [%[inptr0], %[input_col_stride1]]\n" "fmls v10.4s, v12.4s, v0.s[2]\n" "ldr q5, [x16, x20]\n" "fmls v14.4s, v2.4s, v0.s[3]\n" - "ldr q20, [x16, x9]\n" + "ldr q20, [x16, x10]\n" "fmla v9.4s, v12.4s, v0.s[2]\n" "ldr q3, [x16]\n" "fmls v10.4s, v2.4s, v0.s[2]\n" "ldr q6, [x16, x21]\n" "mov v7.16b, v8.16b\n" - "ldr q16, [x16, x19]\n" + "ldr q16, [x16, x8]\n" "fmls v9.4s, v2.4s, v0.s[2]\n" "ldr q22, [x16, %[input_col_stride1]]\n" "fadd v10.4s, v10.4s, v4.4s\n" "ldr q17, [x17, x20]\n" "fmls v7.4s, v12.4s, v0.s[1]\n" - "ldr q15, [x17, x9]\n" + "ldr q15, [x17, x10]\n" "fsub v9.4s, v9.4s, v4.4s\n" "ldr q19, [x17]\n" "mov v8.16b, v8.16b\n" "ldr q18, [x17, x21]\n" "fsub v7.4s, v7.4s, v2.4s\n" - "ldr q13, [x17, x19]\n" + "ldr q13, [x17, x8]\n" "fmla v7.4s, v4.4s, v0.s[1]\n" "ldr q21, [x17, %[input_col_stride1]]\n" "fmla v8.4s, v12.4s, v0.s[1]\n" @@ -180,25 +179,25 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "mov v25.16b, v19.16b\n" "ldr q11, [x25, x20]\n" "mov v10.16b, v11.16b\n" - "ldr q23, [x25, x9]\n" + "ldr q23, [x25, x10]\n" "mov v9.16b, v11.16b\n" "ldr q7, [x25]\n" "fmla v10.4s, v7.4s, v0.s[2]\n" "ldr q13, [x25, x21]\n" "mov v7.16b, v11.16b\n" - "ldr q31, [x25, x19]\n" + "ldr q31, [x25, x8]\n" "mov v8.16b, v11.16b\n" "ldr q21, [x25, %[input_col_stride1]]\n" "fmls v10.4s, v23.4s, v0.s[3]\n" "ldr q30, [x26, x20]\n" "fmls v9.4s, v21.4s, v0.s[2]\n" - "ldr q29, [x26, x9]\n" + "ldr q29, [x26, x10]\n" "fmla v7.4s, v21.4s, v0.s[2]\n" "ldr q22, [x26]\n" "fmls v8.4s, v21.4s, v0.s[1]\n" "ldr q24, [x26, x21]\n" "fmls v9.4s, v23.4s, v0.s[2]\n" - "ldr q27, [x26, x19]\n" + "ldr q27, [x26, x8]\n" "fmls v7.4s, v23.4s, v0.s[2]\n" "ldr q28, [x26, %[input_col_stride1]]\n" "fsub v8.4s, v8.4s, v23.4s\n" @@ -360,13 +359,13 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "add x14, x14, #16\n" "ldr q2, [x27, x20]\n" "mov v4.16b, v2.16b\n" - "ldr q17, [x27, x9]\n" + "ldr q17, [x27, x10]\n" "mov v12.16b, v2.16b\n" "ldr q18, [x27]\n" "fmla v4.4s, v18.4s, v0.s[2]\n" "ldr q3, [x27, x21]\n" "mov v6.16b, v2.16b\n" - "ldr q5, [x27, x19]\n" + "ldr q5, [x27, x8]\n" "mov v1.16b, v2.16b\n" "ldr q18, [x27, %[input_col_stride1]]\n" "fmls v4.4s, v17.4s, v0.s[3]\n" @@ -420,37 +419,37 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "blt 3f\n" "ldr d8, [%[inptr0], x20]\n" "mov v14.16b, v8.16b\n" - "ldr d2, [%[inptr0], x9]\n" + "ldr d2, [%[inptr0], x10]\n" "mov v10.16b, v8.16b\n" "ldr d9, [%[inptr0]]\n" "fmla v14.4s, v9.4s, v0.s[2]\n" "ldr d1, [%[inptr0], x21]\n" "mov v9.16b, v8.16b\n" - "ldr d4, [%[inptr0], x19]\n" + "ldr d4, [%[inptr0], x8]\n" "mov v7.16b, v8.16b\n" "ldr d12, [%[inptr0], %[input_col_stride1]]\n" "fmls v14.4s, v2.4s, v0.s[3]\n" "ldr d5, [x16, x20]\n" "fmls v10.4s, v12.4s, v0.s[2]\n" - "ldr d20, [x16, x9]\n" + "ldr d20, [x16, x10]\n" "fmla v9.4s, v12.4s, v0.s[2]\n" "ldr d3, [x16]\n" "fmls v7.4s, v12.4s, v0.s[1]\n" "ldr d6, [x16, x21]\n" "fmls v10.4s, v2.4s, v0.s[2]\n" - "ldr d16, [x16, x19]\n" + "ldr d16, [x16, x8]\n" "fmls v9.4s, v2.4s, v0.s[2]\n" "ldr d22, [x16, %[input_col_stride1]]\n" "fsub v7.4s, v7.4s, v2.4s\n" "ldr d17, [x17, x20]\n" "fadd v10.4s, v10.4s, v4.4s\n" - "ldr d15, [x17, x9]\n" + "ldr d15, [x17, x10]\n" "fsub v9.4s, v9.4s, v4.4s\n" "ldr d19, [x17]\n" "fmla v7.4s, v4.4s, v0.s[1]\n" "ldr d18, [x17, x21]\n" "mov v8.16b, v8.16b\n" - "ldr d13, [x17, x19]\n" + "ldr d13, [x17, x8]\n" "mov v11.16b, v1.16b\n" "ldr d21, [x17, %[input_col_stride1]]\n" "fmla v8.4s, v12.4s, v0.s[1]\n" @@ -534,25 +533,25 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "mov v25.16b, v19.16b\n" "ldr d11, [x25, x20]\n" "mov v10.16b, v11.16b\n" - "ldr d23, [x25, x9]\n" + "ldr d23, [x25, x10]\n" "mov v9.16b, v11.16b\n" "ldr d7, [x25]\n" "fmla v10.4s, v7.4s, v0.s[2]\n" "ldr d13, [x25, x21]\n" "mov v7.16b, v11.16b\n" - "ldr d31, [x25, x19]\n" + "ldr d31, [x25, x8]\n" "mov v8.16b, v11.16b\n" "ldr d21, [x25, %[input_col_stride1]]\n" "fmls v10.4s, v23.4s, v0.s[3]\n" "ldr d30, [x26, x20]\n" "fmls v9.4s, v21.4s, v0.s[2]\n" - "ldr d29, [x26, x9]\n" + "ldr d29, [x26, x10]\n" "fmla v7.4s, v21.4s, v0.s[2]\n" "ldr d22, [x26]\n" "fmls v8.4s, v21.4s, v0.s[1]\n" "ldr d24, [x26, x21]\n" "fmls v9.4s, v23.4s, v0.s[2]\n" - "ldr d27, [x26, x19]\n" + "ldr d27, [x26, x8]\n" "fmls v7.4s, v23.4s, v0.s[2]\n" "ldr d28, [x26, %[input_col_stride1]]\n" "fsub v8.4s, v8.4s, v23.4s\n" @@ -714,13 +713,13 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "add x14, x14, #8\n" "ldr d2, [x27, x20]\n" "mov v4.16b, v2.16b\n" - "ldr d17, [x27, x9]\n" + "ldr d17, [x27, x10]\n" "mov v12.16b, v2.16b\n" "ldr d18, [x27]\n" "fmla v4.4s, v18.4s, v0.s[2]\n" "ldr d3, [x27, x21]\n" "mov v6.16b, v2.16b\n" - "ldr d5, [x27, x19]\n" + "ldr d5, [x27, x8]\n" "mov v1.16b, v2.16b\n" "ldr d18, [x27, %[input_col_stride1]]\n" "fmls v4.4s, v17.4s, v0.s[3]\n" @@ -771,37 +770,37 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "cbz %w[n_channels], 4f\n" "ldr s8, [%[inptr0], x20]\n" "mov v14.16b, v8.16b\n" - "ldr s2, [%[inptr0], x9]\n" + "ldr s2, [%[inptr0], x10]\n" "mov v10.16b, v8.16b\n" "ldr s9, [%[inptr0]]\n" "fmla v14.4s, v9.4s, v0.s[2]\n" "ldr s1, [%[inptr0], x21]\n" "mov v9.16b, v8.16b\n" - "ldr s4, [%[inptr0], x19]\n" + "ldr s4, [%[inptr0], x8]\n" "mov v7.16b, v8.16b\n" "ldr s12, [%[inptr0], %[input_col_stride1]]\n" "fmls v14.4s, v2.4s, v0.s[3]\n" "ldr s5, [x16, x20]\n" "fmls v10.4s, v12.4s, v0.s[2]\n" - "ldr s20, [x16, x9]\n" + "ldr s20, [x16, x10]\n" "fmla v9.4s, v12.4s, v0.s[2]\n" "ldr s3, [x16]\n" "fmls v7.4s, v12.4s, v0.s[1]\n" "ldr s6, [x16, x21]\n" "fmls v10.4s, v2.4s, v0.s[2]\n" - "ldr s16, [x16, x19]\n" + "ldr s16, [x16, x8]\n" "fmls v9.4s, v2.4s, v0.s[2]\n" "ldr s22, [x16, %[input_col_stride1]]\n" "fsub v7.4s, v7.4s, v2.4s\n" "ldr s17, [x17, x20]\n" "fadd v10.4s, v10.4s, v4.4s\n" - "ldr s15, [x17, x9]\n" + "ldr s15, [x17, x10]\n" "fsub v9.4s, v9.4s, v4.4s\n" "ldr s19, [x17]\n" "fmla v7.4s, v4.4s, v0.s[1]\n" "ldr s18, [x17, x21]\n" "mov v8.16b, v8.16b\n" - "ldr s13, [x17, x19]\n" + "ldr s13, [x17, x8]\n" "mov v11.16b, v1.16b\n" "ldr s21, [x17, %[input_col_stride1]]\n" "fmla v8.4s, v12.4s, v0.s[1]\n" @@ -885,25 +884,25 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "mov v25.16b, v19.16b\n" "ldr s11, [x25, x20]\n" "mov v10.16b, v11.16b\n" - "ldr s23, [x25, x9]\n" + "ldr s23, [x25, x10]\n" "mov v9.16b, v11.16b\n" "ldr s7, [x25]\n" "fmla v10.4s, v7.4s, v0.s[2]\n" "ldr s13, [x25, x21]\n" "mov v7.16b, v11.16b\n" - "ldr s31, [x25, x19]\n" + "ldr s31, [x25, x8]\n" "mov v8.16b, v11.16b\n" "ldr s21, [x25, %[input_col_stride1]]\n" "fmls v10.4s, v23.4s, v0.s[3]\n" "ldr s30, [x26, x20]\n" "fmls v9.4s, v21.4s, v0.s[2]\n" - "ldr s29, [x26, x9]\n" + "ldr s29, [x26, x10]\n" "fmla v7.4s, v21.4s, v0.s[2]\n" "ldr s22, [x26]\n" "fmls v8.4s, v21.4s, v0.s[1]\n" "ldr s24, [x26, x21]\n" "fmls v9.4s, v23.4s, v0.s[2]\n" - "ldr s27, [x26, x19]\n" + "ldr s27, [x26, x8]\n" "fmls v7.4s, v23.4s, v0.s[2]\n" "ldr s28, [x26, %[input_col_stride1]]\n" "fsub v8.4s, v8.4s, v23.4s\n" @@ -1065,13 +1064,13 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile "add x14, x14, #4\n" "ldr s2, [x27, x20]\n" "mov v4.16b, v2.16b\n" - "ldr s17, [x27, x9]\n" + "ldr s17, [x27, x10]\n" "mov v12.16b, v2.16b\n" "ldr s18, [x27]\n" "fmla v4.4s, v18.4s, v0.s[2]\n" "ldr s3, [x27, x21]\n" "mov v6.16b, v2.16b\n" - "ldr s5, [x27, x19]\n" + "ldr s5, [x27, x8]\n" "mov v1.16b, v2.16b\n" "ldr s18, [x27, %[input_col_stride1]]\n" "fmls v4.4s, v17.4s, v0.s[3]\n" @@ -1129,180 +1128,13 @@ void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile : "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8", - "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x9", "x19", + "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x10", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory" ); } -#else // __arm__ not __aarch64__ - -template <> -void InputTransform<6, 6, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, - const float* const input_base, - const int input_row_stride, - const int input_col_stride, - float* outptr, - const int matrix_stride -) -{ - constexpr int inner_tile_rows = 6; - constexpr int inner_tile_cols = 6; - - // Get pointers into the input tile - const float *x_ptrs[inner_tile_rows][inner_tile_cols]; - for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++) - { - // Get a pointer into the row - const float* const row_ptr = input_base + xi*input_row_stride; - - for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++) - { - x_ptrs[i][j] = row_ptr + xj*input_col_stride; - } - } - - // Matrices used/computed in this kernel. - float x[inner_tile_rows][inner_tile_cols]; - float XTx[inner_tile_rows][inner_tile_cols]; - float U[inner_tile_rows][inner_tile_cols]; - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = XTx[i][j] = 0.0f; - } - } - - // Perform the Winograd input transformation for each channel in the input - // tensor. - int channels_remaining = n_channels; - for (; channels_remaining >= 2; channels_remaining -= 2) - { - // Matrices used/computed in this kernel - float32x2_t x[inner_tile_rows][inner_tile_cols]; - float32x2_t XTx[inner_tile_rows][inner_tile_cols]; - float32x2_t U[inner_tile_rows][inner_tile_cols]; - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = vdup_n_f32(0.0f); - XTx[i][j] = vdup_n_f32(0.0f); - } - } - - // Read a 6x6 tile in the Winograd domain - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = vld1_f32(x_ptrs[i][j]); - x_ptrs[i][j] += 2; - } - } - - // Compute XT . x - for (int j = 0; j < inner_tile_cols; j++) - { - // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j]; - XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f); - - // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j]; - XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f); - - // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j]; - XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f); - - // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j]; - XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f); - - // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j]; - XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f); - - // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j]; - XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f); - } - - // Compute U = XT . x . X - for (int i = 0; i < inner_tile_rows; i++) - { - // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4]; - U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f); - - // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4]; - U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f); - - // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4]; - U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f); - - // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4]; - U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f); - - // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4]; - U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f); - - // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5]; - U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f); - } - - // Store the transformed matrix - for (int i = 0, m = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++, m++) - { - vst1_f32(outptr + m*matrix_stride, U[i][j]); - } - } - outptr += 2; - } - for (; channels_remaining; channels_remaining--) - { - // Load x - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = *(x_ptrs[i][j]++); - } - } - - // Compute XT . x - for (int j = 0; j < inner_tile_cols; j++) - { - XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j]; - XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j]; - XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j]; - XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j]; - XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j]; - XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j]; - } - - // Compute U = XT . x . X - for (int i = 0; i < inner_tile_rows; i++) - { - U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4]; - U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4]; - U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4]; - U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4]; - U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4]; - U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5]; - } - - // Store the transformed matrix - for (int i = 0, m = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++, m++) - { - *(outptr + m*matrix_stride) = U[i][j]; - } - } - outptr++; - } -} - -#endif - -template class InputTransform<6, 6, float, float, WinogradRoots::Integers>; - +} // namespace input_transform } // namespace winograd +} // namespace arm_conv + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp index 8f6e9e8b40..3e1fc491f1 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_1x8_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2022-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,20 +22,20 @@ * SOFTWARE. */ -#include "arm.hpp" -#include "input.hpp" +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace input_transform { -template <> -void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, - const float* const input_base, - const int, // We don't need to stride over rows - const int input_col_stride, - float* outptr, - const int matrix_stride +void arm_fp32_1x8( + const unsigned int n_channels, + const float * input_base, + size_t, // We don't need to stride over rows + size_t input_col_stride, + float *outptr, + size_t matrix_stride ) { constexpr int inner_tile_cols = 8; @@ -59,7 +59,6 @@ void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile // Perform the Winograd input transformation for each channel in the input // tensor. int channels_remaining = n_channels; -#ifdef _arm_any_ for (; channels_remaining >= 4; channels_remaining -= 4) { float32x4_t x[inner_tile_cols], U[inner_tile_cols]; @@ -124,7 +123,6 @@ void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile } outptr += 2; } -#endif // _arm_any_ for (; channels_remaining; channels_remaining--) { // Load x @@ -152,7 +150,6 @@ void InputTransform<1, 8, float, float, WinogradRoots::Integers>::transform_tile } } -template class InputTransform<1, 8, float, float, WinogradRoots::Integers>; -template class InputTransform<8, 1, float, float, WinogradRoots::Integers>; - +} // namespace input_transform } // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_4x4.cpp index 69d3e8feb5..a4e6b433c7 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_4x4.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,20 +22,20 @@ * SOFTWARE. */ -#include "input.hpp" -#include "arm.hpp" +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace input_transform { -template <> -void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, - const float* const input_base, - const int input_row_stride, - const int input_col_stride, - float* outptr, - const int matrix_stride +void arm_fp32_4x4( + const unsigned int n_channels, + const float *input_base, + const size_t input_row_stride, + const size_t input_col_stride, + float *outptr, + const size_t matrix_stride ) { constexpr int inner_tile_rows = 4, inner_tile_cols = 4; @@ -69,7 +69,6 @@ void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile // Perform the Winograd input transformation for each channel in the input // tensor. int channels_remaining = n_channels; -#ifdef __aarch64__ for (; channels_remaining >= 4; channels_remaining -= 4) { // Matrices used/computed in this kernel. @@ -138,8 +137,6 @@ void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile } outptr += 4; } -#endif // __aarch64__ -#ifdef __arm_any__ for (; channels_remaining >= 2; channels_remaining -= 2) { // Matrices used/computed in this kernel. @@ -208,7 +205,6 @@ void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile } outptr += 2; } -#endif // __arm_any__ for (; channels_remaining; channels_remaining--) { // Load x @@ -250,6 +246,6 @@ void InputTransform<4, 4, float, float, WinogradRoots::Integers>::transform_tile } } -template class InputTransform<4, 4, float, float, WinogradRoots::Integers>; - -} // namespace +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp new file mode 100644 index 0000000000..4adc45768e --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_6x6.cpp @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __aarch64__ + +#include <arm_neon.h> +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +void arm_fp32_6x6( + unsigned int n_channels, + const float* const input_base, + const size_t input_row_stride, + const size_t input_col_stride, + float* outptr, + const size_t matrix_stride +) +{ + constexpr int inner_tile_rows = 6; + constexpr int inner_tile_cols = 6; + + // Get pointers into the input tile + const float *x_ptrs[inner_tile_rows][inner_tile_cols]; + for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++) + { + // Get a pointer into the row + const float* const row_ptr = input_base + xi*input_row_stride; + + for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++) + { + x_ptrs[i][j] = row_ptr + xj*input_col_stride; + } + } + + // Matrices used/computed in this kernel. + float x[inner_tile_rows][inner_tile_cols]; + float XTx[inner_tile_rows][inner_tile_cols]; + float U[inner_tile_rows][inner_tile_cols]; + for (int i = 0; i < inner_tile_rows; i++) + { + for (int j = 0; j < inner_tile_cols; j++) + { + x[i][j] = XTx[i][j] = 0.0f; + } + } + + // Perform the Winograd input transformation for each channel in the input + // tensor. + int channels_remaining = n_channels; + for (; channels_remaining >= 2; channels_remaining -= 2) + { + // Matrices used/computed in this kernel + float32x2_t x[inner_tile_rows][inner_tile_cols]; + float32x2_t XTx[inner_tile_rows][inner_tile_cols]; + float32x2_t U[inner_tile_rows][inner_tile_cols]; + for (int i = 0; i < inner_tile_rows; i++) + { + for (int j = 0; j < inner_tile_cols; j++) + { + x[i][j] = vdup_n_f32(0.0f); + XTx[i][j] = vdup_n_f32(0.0f); + } + } + + // Read a 6x6 tile in the Winograd domain + for (int i = 0; i < inner_tile_rows; i++) + { + for (int j = 0; j < inner_tile_cols; j++) + { + x[i][j] = vld1_f32(x_ptrs[i][j]); + x_ptrs[i][j] += 2; + } + } + + // Compute XT . x + for (int j = 0; j < inner_tile_cols; j++) + { + // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j]; + XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f); + + // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j]; + XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f); + + // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j]; + XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f); + + // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j]; + XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f); + + // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j]; + XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f); + + // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j]; + XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f); + } + + // Compute U = XT . x . X + for (int i = 0; i < inner_tile_rows; i++) + { + // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4]; + U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f); + + // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4]; + U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f); + + // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4]; + U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f); + + // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4]; + U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f); + + // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4]; + U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f); + + // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5]; + U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f); + } + + // Store the transformed matrix + for (int i = 0, m = 0; i < inner_tile_rows; i++) + { + for (int j = 0; j < inner_tile_cols; j++, m++) + { + vst1_f32(outptr + m*matrix_stride, U[i][j]); + } + } + outptr += 2; + } + for (; channels_remaining; channels_remaining--) + { + // Load x + for (int i = 0; i < inner_tile_rows; i++) + { + for (int j = 0; j < inner_tile_cols; j++) + { + x[i][j] = *(x_ptrs[i][j]++); + } + } + + // Compute XT . x + for (int j = 0; j < inner_tile_cols; j++) + { + XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j]; + XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j]; + XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j]; + XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j]; + XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j]; + XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j]; + } + + // Compute U = XT . x . X + for (int i = 0; i < inner_tile_rows; i++) + { + U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4]; + U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4]; + U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4]; + U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4]; + U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4]; + U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5]; + } + + // Store the transformed matrix + for (int i = 0, m = 0; i < inner_tile_rows; i++) + { + for (int j = 0; j < inner_tile_cols; j++, m++) + { + *(outptr + m*matrix_stride) = U[i][j]; + } + } + outptr++; + } +} + +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv + +#endif // ! __aarch64__ diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp new file mode 100644 index 0000000000..f446e7ea8b --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if defined(ARM_COMPUTE_ENABLE_SME) + +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +void sme_fp32_mla_6x6( + const unsigned int num_channels, + const float *input, + const size_t input_row_stride, + const size_t input_col_stride, + float *output, + const size_t output_col_stride +) +{ + const float B_values[4] = { 1.0f, 2.0f, 4.0f, 5.0f }; + long long_channels = num_channels; + + // Generated by armasmgen (February 04th, 2021) + __asm__ __volatile__( + ".inst 0xd503477f // SMSTART ZA\n" + "fmov z16.s, #4.0\n" + "ptrue p1.b\n" + "ld1rqw { z2.s }, p1/Z, [%x[B_values]]\n" + "add x16, %x[input_row_0], %x[input_row_stride], LSL #2\n" + "add x15, %x[output_row_0], %x[output_row_stride], LSL #2\n" + "add x14, %x[input_row_0], %x[input_row_stride], LSL #3\n" + "add x13, %x[output_row_0], %x[output_row_stride], LSL #3\n" + "add x12, x14, %x[input_row_stride], LSL #2\n" + "add x11, x13, %x[output_row_stride], LSL #2\n" + "add x10, %x[input_row_0], %x[input_row_stride], LSL #4\n" + "add x9, %x[output_row_0], %x[output_row_stride], LSL #4\n" + "add x28, x10, %x[input_row_stride], LSL #2\n" + "add x27, x9, %x[output_row_stride], LSL #2\n" + "lsl x26, %x[input_col_1_stride], #0x1\n" + "lsl x25, %x[output_col_1_stride], #0x1\n" + "add x24, x26, %x[input_col_1_stride]\n" + "add x23, x25, %x[output_col_1_stride]\n" + "lsl x22, %x[input_col_1_stride], #0x2\n" + "lsl x21, %x[output_col_1_stride], #0x2\n" + "add x20, x22, %x[input_col_1_stride]\n" + "add x8, x21, %x[output_col_1_stride]\n" + "whilelt p0.s, XZR, %x[num_channels]\n" + "beq 2f\n" + "1:" // channel_loop + "ld1w { z31.s }, p0/Z, [%x[input_row_0]]\n" + "decw %x[num_channels]\n" + "ld1w { z28.s }, p0/Z, [%x[input_row_0], %x[input_col_1_stride], LSL #2]\n" + "fmul z13.s, z28.s, z2.s[1]\n" + "ld1w { z27.s }, p0/Z, [%x[input_row_0], x26, LSL #2]\n" + "ld1w { z11.s }, p0/Z, [%x[input_row_0], x24, LSL #2]\n" + "fneg z13.s, p1/M, z13.s\n" + "ld1w { z7.s }, p0/Z, [%x[input_row_0], x22, LSL #2]\n" + "fsub z15.s, z7.s, z27.s\n" + "fmad z31.s, p1/M, z16.s, z7.s\n" + "ld1w { z3.s }, p0/Z, [%x[input_row_0], x20, LSL #2]\n" + "fmla z13.s, z11.s, z2.s[1]\n" + "ld1w { z12.s }, p0/Z, [x14]\n" + "incb %x[input_row_0]\n" + "fmls z31.s, z27.s, z2.s[3]\n" + "ld1w { z14.s }, p0/Z, [x14, %x[input_col_1_stride], LSL #2]\n" + "fsub z25.s, z15.s, z13.s\n" + "fadd z8.s, z13.s, z15.s\n" + "ld1w { z24.s }, p0/Z, [x14, x26, LSL #2]\n" + "fmsb z27.s, p1/M, z16.s, z7.s\n" + "ld1w { z22.s }, p0/Z, [x14, x24, LSL #2]\n" + "fmul z7.s, z28.s, z2.s[2]\n" + "ld1w { z1.s }, p0/Z, [x14, x22, LSL #2]\n" + "fsub z15.s, z1.s, z24.s\n" + "fneg z7.s, p1/M, z7.s\n" + "ld1w { z20.s }, p0/Z, [x14, x20, LSL #2]\n" + "fadd z7.s, z7.s, z11.s\n" + "ld1w { z29.s }, p0/Z, [x10]\n" + "incb x14\n" + "fmad z28.s, p1/M, z16.s, z3.s\n" + "ld1w { z10.s }, p0/Z, [x10, %x[input_col_1_stride], LSL #2]\n" + "fmad z12.s, p1/M, z16.s, z1.s\n" + "ld1w { z18.s }, p0/Z, [x10, x26, LSL #2]\n" + "fmul z13.s, z14.s, z2.s[1]\n" + "ld1w { z19.s }, p0/Z, [x10, x24, LSL #2]\n" + "fadd z17.s, z7.s, z27.s\n" + "ld1w { z9.s }, p0/Z, [x10, x22, LSL #2]\n" + "fsub z27.s, z27.s, z7.s\n" + "fmls z28.s, z11.s, z2.s[3]\n" + "ld1w { z21.s }, p0/Z, [x10, x20, LSL #2]\n" + "incb x10\n" + "fmls z12.s, z24.s, z2.s[3]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z22.s, z2.s[1]\n" + "fsub z30.s, z15.s, z13.s\n" + "fadd z4.s, z13.s, z15.s\n" + "fmsb z24.s, p1/M, z16.s, z1.s\n" + "fsub z15.s, z9.s, z18.s\n" + "fmul z1.s, z14.s, z2.s[2]\n" + "fmad z14.s, p1/M, z16.s, z20.s\n" + "fmad z29.s, p1/M, z16.s, z9.s\n" + "fmul z13.s, z10.s, z2.s[1]\n" + "fneg z1.s, p1/M, z1.s\n" + "fadd z1.s, z1.s, z22.s\n" + "fmls z14.s, z22.s, z2.s[3]\n" + "fmls z29.s, z18.s, z2.s[3]\n" + "fadd z5.s, z1.s, z24.s\n" + "fsub z24.s, z24.s, z1.s\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z19.s, z2.s[1]\n" + "fsub z23.s, z15.s, z13.s\n" + "fadd z11.s, z13.s, z15.s\n" + "fmsb z18.s, p1/M, z16.s, z9.s\n" + "fmul z9.s, z10.s, z2.s[2]\n" + "fmad z10.s, p1/M, z16.s, z21.s\n" + "fmad z31.s, p1/M, z16.s, z29.s\n" + "fmad z8.s, p1/M, z16.s, z11.s\n" + "fneg z9.s, p1/M, z9.s\n" + "fadd z9.s, z9.s, z19.s\n" + "fmls z10.s, z19.s, z2.s[3]\n" + "fmls z31.s, z12.s, z2.s[3]\n" + "st1w { z31.s }, p0, [%x[output_row_0]]\n" + "fadd z26.s, z9.s, z18.s\n" + "fsub z18.s, z18.s, z9.s\n" + "fmls z8.s, z4.s, z2.s[3]\n" + "fmad z25.s, p1/M, z16.s, z23.s\n" + "fmad z28.s, p1/M, z16.s, z10.s\n" + "fmad z17.s, p1/M, z16.s, z26.s\n" + "fmad z27.s, p1/M, z16.s, z18.s\n" + "fmls z25.s, z30.s, z2.s[3]\n" + "fmls z28.s, z14.s, z2.s[3]\n" + "fmls z17.s, z5.s, z2.s[3]\n" + "st1w { z17.s }, p0, [%x[output_row_0], %x[output_col_1_stride], LSL #2]\n" + "fmls z27.s, z24.s, z2.s[3]\n" + "st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n" + "st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n" + "st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n" + "st1w { z28.s }, p0, [%x[output_row_0], x8, LSL #2]\n" + "incb %x[output_row_0]\n" + "ld1w { z19.s }, p0/Z, [x16]\n" + "ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n" + "fmul z13.s, z7.s, z2.s[1]\n" + "ld1w { z6.s }, p0/Z, [x16, x26, LSL #2]\n" + "ld1w { z27.s }, p0/Z, [x16, x24, LSL #2]\n" + "fneg z13.s, p1/M, z13.s\n" + "ld1w { z25.s }, p0/Z, [x16, x22, LSL #2]\n" + "fsub z15.s, z25.s, z6.s\n" + "fmad z19.s, p1/M, z16.s, z25.s\n" + "ld1w { z20.s }, p0/Z, [x16, x20, LSL #2]\n" + "fmla z13.s, z27.s, z2.s[1]\n" + "ld1w { z0.s }, p0/Z, [x12]\n" + "incb x16\n" + "fmls z19.s, z6.s, z2.s[3]\n" + "ld1w { z31.s }, p0/Z, [x12, %x[input_col_1_stride], LSL #2]\n" + "fsub z8.s, z15.s, z13.s\n" + "fadd z28.s, z13.s, z15.s\n" + "ld1w { z1.s }, p0/Z, [x12, x26, LSL #2]\n" + "fmsb z6.s, p1/M, z16.s, z25.s\n" + "ld1w { z21.s }, p0/Z, [x12, x24, LSL #2]\n" + "fmul z25.s, z7.s, z2.s[2]\n" + "ld1w { z22.s }, p0/Z, [x12, x22, LSL #2]\n" + "fsub z15.s, z22.s, z1.s\n" + "fneg z25.s, p1/M, z25.s\n" + "ld1w { z17.s }, p0/Z, [x12, x20, LSL #2]\n" + "fadd z25.s, z25.s, z27.s\n" + "incb x12\n" + "fmad z7.s, p1/M, z16.s, z20.s\n" + "fmad z0.s, p1/M, z16.s, z22.s\n" + "fmul z13.s, z31.s, z2.s[1]\n" + "fadd z3.s, z25.s, z6.s\n" + "fsub z6.s, z6.s, z25.s\n" + "fmls z7.s, z27.s, z2.s[3]\n" + "fmls z0.s, z1.s, z2.s[3]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z21.s, z2.s[1]\n" + "fsub z9.s, z15.s, z13.s\n" + "fadd z27.s, z13.s, z15.s\n" + "fmsb z1.s, p1/M, z16.s, z22.s\n" + "fsub z15.s, z29.s, z12.s\n" + "fmul z22.s, z31.s, z2.s[2]\n" + "fmad z31.s, p1/M, z16.s, z17.s\n" + "fmul z13.s, z19.s, z2.s[1]\n" + "fmsb z12.s, p1/M, z16.s, z29.s\n" + "fneg z22.s, p1/M, z22.s\n" + "fadd z22.s, z22.s, z21.s\n" + "fmls z31.s, z21.s, z2.s[3]\n" + "fneg z13.s, p1/M, z13.s\n" + "fadd z25.s, z22.s, z1.s\n" + "fsub z1.s, z1.s, z22.s\n" + "fmla z13.s, z0.s, z2.s[1]\n" + "fmul z29.s, z19.s, z2.s[2]\n" + "fadd z22.s, z13.s, z15.s\n" + "st1w { z22.s }, p0, [x11]\n" + "fneg z29.s, p1/M, z29.s\n" + "fsub z22.s, z15.s, z13.s\n" + "fadd z29.s, z29.s, z0.s\n" + "st1w { z22.s }, p0, [x9]\n" + "fadd z22.s, z29.s, z12.s\n" + "fsub z15.s, z26.s, z5.s\n" + "fmul z13.s, z3.s, z2.s[1]\n" + "fsub z12.s, z12.s, z29.s\n" + "fmsb z5.s, p1/M, z16.s, z26.s\n" + "fmul z26.s, z3.s, z2.s[2]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z25.s, z2.s[1]\n" + "fneg z26.s, p1/M, z26.s\n" + "fadd z26.s, z26.s, z25.s\n" + "fadd z21.s, z13.s, z15.s\n" + "st1w { z21.s }, p0, [x11, %x[output_col_1_stride], LSL #2]\n" + "fsub z21.s, z15.s, z13.s\n" + "fmul z13.s, z6.s, z2.s[1]\n" + "fneg z13.s, p1/M, z13.s\n" + "st1w { z21.s }, p0, [x9, %x[output_col_1_stride], LSL #2]\n" + "fadd z21.s, z26.s, z5.s\n" + "fsub z15.s, z18.s, z24.s\n" + "fmla z13.s, z1.s, z2.s[1]\n" + "fsub z5.s, z5.s, z26.s\n" + "fmsb z24.s, p1/M, z16.s, z18.s\n" + "fmul z18.s, z6.s, z2.s[2]\n" + "fadd z20.s, z13.s, z15.s\n" + "st1w { z20.s }, p0, [x11, x25, LSL #2]\n" + "fneg z18.s, p1/M, z18.s\n" + "fsub z20.s, z15.s, z13.s\n" + "fadd z18.s, z18.s, z1.s\n" + "st1w { z20.s }, p0, [x9, x25, LSL #2]\n" + "fadd z20.s, z18.s, z24.s\n" + "fsub z15.s, z11.s, z4.s\n" + "fmul z13.s, z28.s, z2.s[1]\n" + "fsub z24.s, z24.s, z18.s\n" + "fmsb z4.s, p1/M, z16.s, z11.s\n" + "fmul z11.s, z28.s, z2.s[2]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z27.s, z2.s[1]\n" + "fneg z11.s, p1/M, z11.s\n" + "fadd z11.s, z11.s, z27.s\n" + "fadd z26.s, z13.s, z15.s\n" + "st1w { z26.s }, p0, [x11, x23, LSL #2]\n" + "fsub z26.s, z15.s, z13.s\n" + "fmul z13.s, z8.s, z2.s[1]\n" + "fneg z13.s, p1/M, z13.s\n" + "st1w { z26.s }, p0, [x9, x23, LSL #2]\n" + "fadd z26.s, z11.s, z4.s\n" + "fsub z15.s, z23.s, z30.s\n" + "fmla z13.s, z9.s, z2.s[1]\n" + "fsub z4.s, z4.s, z11.s\n" + "fmsb z30.s, p1/M, z16.s, z23.s\n" + "fmul z23.s, z8.s, z2.s[2]\n" + "fadd z18.s, z13.s, z15.s\n" + "st1w { z18.s }, p0, [x11, x21, LSL #2]\n" + "fneg z23.s, p1/M, z23.s\n" + "fsub z18.s, z15.s, z13.s\n" + "fadd z23.s, z23.s, z9.s\n" + "st1w { z18.s }, p0, [x9, x21, LSL #2]\n" + "fadd z18.s, z23.s, z30.s\n" + "fsub z15.s, z10.s, z14.s\n" + "fmul z13.s, z7.s, z2.s[1]\n" + "fsub z30.s, z30.s, z23.s\n" + "fmsb z14.s, p1/M, z16.s, z10.s\n" + "fmul z10.s, z7.s, z2.s[2]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z31.s, z2.s[1]\n" + "fneg z10.s, p1/M, z10.s\n" + "fadd z10.s, z10.s, z31.s\n" + "fadd z17.s, z13.s, z15.s\n" + "st1w { z17.s }, p0, [x11, x8, LSL #2]\n" + "fsub z17.s, z15.s, z13.s\n" + "incb x11\n" + "st1w { z17.s }, p0, [x9, x8, LSL #2]\n" + "fadd z17.s, z10.s, z14.s\n" + "fsub z14.s, z14.s, z10.s\n" + "st1w { z22.s }, p0, [x15]\n" + "incb x9\n" + "st1w { z12.s }, p0, [x13]\n" + "st1w { z21.s }, p0, [x15, %x[output_col_1_stride], LSL #2]\n" + "st1w { z5.s }, p0, [x13, %x[output_col_1_stride], LSL #2]\n" + "st1w { z20.s }, p0, [x15, x25, LSL #2]\n" + "st1w { z24.s }, p0, [x13, x25, LSL #2]\n" + "st1w { z26.s }, p0, [x15, x23, LSL #2]\n" + "st1w { z4.s }, p0, [x13, x23, LSL #2]\n" + "st1w { z18.s }, p0, [x15, x21, LSL #2]\n" + "st1w { z30.s }, p0, [x13, x21, LSL #2]\n" + "st1w { z17.s }, p0, [x15, x8, LSL #2]\n" + "incb x15\n" + "st1w { z14.s }, p0, [x13, x8, LSL #2]\n" + "incb x13\n" + "ld1w { z23.s }, p0/Z, [x28]\n" + "ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n" + "fmul z13.s, z22.s, z2.s[1]\n" + "ld1w { z21.s }, p0/Z, [x28, x26, LSL #2]\n" + "ld1w { z20.s }, p0/Z, [x28, x24, LSL #2]\n" + "fneg z13.s, p1/M, z13.s\n" + "ld1w { z26.s }, p0/Z, [x28, x22, LSL #2]\n" + "fsub z15.s, z26.s, z21.s\n" + "fmad z23.s, p1/M, z16.s, z26.s\n" + "ld1w { z18.s }, p0/Z, [x28, x20, LSL #2]\n" + "fmla z13.s, z20.s, z2.s[1]\n" + "incb x28\n" + "fmls z23.s, z21.s, z2.s[3]\n" + "fsub z17.s, z15.s, z13.s\n" + "fadd z30.s, z13.s, z15.s\n" + "fmsb z21.s, p1/M, z16.s, z26.s\n" + "fmul z26.s, z22.s, z2.s[2]\n" + "fmad z22.s, p1/M, z16.s, z18.s\n" + "fmad z19.s, p1/M, z16.s, z23.s\n" + "fmad z28.s, p1/M, z16.s, z30.s\n" + "fneg z26.s, p1/M, z26.s\n" + "fadd z26.s, z26.s, z20.s\n" + "fmls z22.s, z20.s, z2.s[3]\n" + "fmls z19.s, z0.s, z2.s[3]\n" + "st1w { z19.s }, p0, [x27]\n" + "fadd z23.s, z26.s, z21.s\n" + "fsub z21.s, z21.s, z26.s\n" + "fmls z28.s, z27.s, z2.s[3]\n" + "fmad z8.s, p1/M, z16.s, z17.s\n" + "fmad z7.s, p1/M, z16.s, z22.s\n" + "fmad z3.s, p1/M, z16.s, z23.s\n" + "fmad z6.s, p1/M, z16.s, z21.s\n" + "fmls z8.s, z9.s, z2.s[3]\n" + "fmls z7.s, z31.s, z2.s[3]\n" + "fmls z3.s, z25.s, z2.s[3]\n" + "st1w { z3.s }, p0, [x27, %x[output_col_1_stride], LSL #2]\n" + "fmls z6.s, z1.s, z2.s[3]\n" + "st1w { z6.s }, p0, [x27, x25, LSL #2]\n" + "st1w { z28.s }, p0, [x27, x23, LSL #2]\n" + "st1w { z8.s }, p0, [x27, x21, LSL #2]\n" + "st1w { z7.s }, p0, [x27, x8, LSL #2]\n" + "incb x27\n" + "whilelt p0.s, XZR, %x[num_channels]\n" + "bne 1b\n" + "2:" // channel_loop_end + ".inst 0xd503467f // SMSTOP\n" + : [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output) + : [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride) + : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); +} + +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv + +#endif // defined(ARM_COMPUTE_ENABLE_SME) diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp new file mode 100644 index 0000000000..7b387e1247 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +void sve_fp32_6x6( + const unsigned int num_channels, + const float *input, + const size_t input_row_stride, + const size_t input_col_stride, + float *output, + const size_t output_col_stride +) +{ + const float B_values[4] = { 1.0f, 2.0f, 4.0f, 5.0f }; + long long_channels = num_channels; + + // Generated by armasmgen (February 04th, 2021) + __asm__ __volatile__( + "fmov z16.s, #4.0\n" + "ptrue p1.b\n" + "ld1rqw { z2.s }, p1/Z, [%x[B_values]]\n" + "add x16, %x[input_row_0], %x[input_row_stride], LSL #2\n" + "add x15, %x[output_row_0], %x[output_row_stride], LSL #2\n" + "add x14, %x[input_row_0], %x[input_row_stride], LSL #3\n" + "add x13, %x[output_row_0], %x[output_row_stride], LSL #3\n" + "add x12, x14, %x[input_row_stride], LSL #2\n" + "add x11, x13, %x[output_row_stride], LSL #2\n" + "add x10, %x[input_row_0], %x[input_row_stride], LSL #4\n" + "add x9, %x[output_row_0], %x[output_row_stride], LSL #4\n" + "add x28, x10, %x[input_row_stride], LSL #2\n" + "add x27, x9, %x[output_row_stride], LSL #2\n" + "lsl x26, %x[input_col_1_stride], #0x1\n" + "lsl x25, %x[output_col_1_stride], #0x1\n" + "add x24, x26, %x[input_col_1_stride]\n" + "add x23, x25, %x[output_col_1_stride]\n" + "lsl x22, %x[input_col_1_stride], #0x2\n" + "lsl x21, %x[output_col_1_stride], #0x2\n" + "add x20, x22, %x[input_col_1_stride]\n" + "add x8, x21, %x[output_col_1_stride]\n" + "whilelt p0.s, XZR, %x[num_channels]\n" + "beq 2f\n" + "1:" // channel_loop + "ld1w { z31.s }, p0/Z, [%x[input_row_0]]\n" + "decw %x[num_channels]\n" + "ld1w { z28.s }, p0/Z, [%x[input_row_0], %x[input_col_1_stride], LSL #2]\n" + "fmul z13.s, z28.s, z2.s[1]\n" + "ld1w { z27.s }, p0/Z, [%x[input_row_0], x26, LSL #2]\n" + "ld1w { z11.s }, p0/Z, [%x[input_row_0], x24, LSL #2]\n" + "fneg z13.s, p1/M, z13.s\n" + "ld1w { z7.s }, p0/Z, [%x[input_row_0], x22, LSL #2]\n" + "fsub z15.s, z7.s, z27.s\n" + "fmad z31.s, p1/M, z16.s, z7.s\n" + "ld1w { z3.s }, p0/Z, [%x[input_row_0], x20, LSL #2]\n" + "fmla z13.s, z11.s, z2.s[1]\n" + "ld1w { z12.s }, p0/Z, [x14]\n" + "incb %x[input_row_0]\n" + "fmls z31.s, z27.s, z2.s[3]\n" + "ld1w { z14.s }, p0/Z, [x14, %x[input_col_1_stride], LSL #2]\n" + "fsub z25.s, z15.s, z13.s\n" + "fadd z8.s, z13.s, z15.s\n" + "ld1w { z24.s }, p0/Z, [x14, x26, LSL #2]\n" + "fmsb z27.s, p1/M, z16.s, z7.s\n" + "ld1w { z22.s }, p0/Z, [x14, x24, LSL #2]\n" + "fmul z7.s, z28.s, z2.s[2]\n" + "ld1w { z1.s }, p0/Z, [x14, x22, LSL #2]\n" + "fsub z15.s, z1.s, z24.s\n" + "fneg z7.s, p1/M, z7.s\n" + "ld1w { z20.s }, p0/Z, [x14, x20, LSL #2]\n" + "fadd z7.s, z7.s, z11.s\n" + "ld1w { z29.s }, p0/Z, [x10]\n" + "incb x14\n" + "fmad z28.s, p1/M, z16.s, z3.s\n" + "ld1w { z10.s }, p0/Z, [x10, %x[input_col_1_stride], LSL #2]\n" + "fmad z12.s, p1/M, z16.s, z1.s\n" + "ld1w { z18.s }, p0/Z, [x10, x26, LSL #2]\n" + "fmul z13.s, z14.s, z2.s[1]\n" + "ld1w { z19.s }, p0/Z, [x10, x24, LSL #2]\n" + "fadd z17.s, z7.s, z27.s\n" + "ld1w { z9.s }, p0/Z, [x10, x22, LSL #2]\n" + "fsub z27.s, z27.s, z7.s\n" + "fmls z28.s, z11.s, z2.s[3]\n" + "ld1w { z21.s }, p0/Z, [x10, x20, LSL #2]\n" + "incb x10\n" + "fmls z12.s, z24.s, z2.s[3]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z22.s, z2.s[1]\n" + "fsub z30.s, z15.s, z13.s\n" + "fadd z4.s, z13.s, z15.s\n" + "fmsb z24.s, p1/M, z16.s, z1.s\n" + "fsub z15.s, z9.s, z18.s\n" + "fmul z1.s, z14.s, z2.s[2]\n" + "fmad z14.s, p1/M, z16.s, z20.s\n" + "fmad z29.s, p1/M, z16.s, z9.s\n" + "fmul z13.s, z10.s, z2.s[1]\n" + "fneg z1.s, p1/M, z1.s\n" + "fadd z1.s, z1.s, z22.s\n" + "fmls z14.s, z22.s, z2.s[3]\n" + "fmls z29.s, z18.s, z2.s[3]\n" + "fadd z5.s, z1.s, z24.s\n" + "fsub z24.s, z24.s, z1.s\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z19.s, z2.s[1]\n" + "fsub z23.s, z15.s, z13.s\n" + "fadd z11.s, z13.s, z15.s\n" + "fmsb z18.s, p1/M, z16.s, z9.s\n" + "fmul z9.s, z10.s, z2.s[2]\n" + "fmad z10.s, p1/M, z16.s, z21.s\n" + "fmad z31.s, p1/M, z16.s, z29.s\n" + "fmad z8.s, p1/M, z16.s, z11.s\n" + "fneg z9.s, p1/M, z9.s\n" + "fadd z9.s, z9.s, z19.s\n" + "fmls z10.s, z19.s, z2.s[3]\n" + "fmls z31.s, z12.s, z2.s[3]\n" + "st1w { z31.s }, p0, [%x[output_row_0]]\n" + "fadd z26.s, z9.s, z18.s\n" + "fsub z18.s, z18.s, z9.s\n" + "fmls z8.s, z4.s, z2.s[3]\n" + "fmad z25.s, p1/M, z16.s, z23.s\n" + "fmad z28.s, p1/M, z16.s, z10.s\n" + "fmad z17.s, p1/M, z16.s, z26.s\n" + "fmad z27.s, p1/M, z16.s, z18.s\n" + "fmls z25.s, z30.s, z2.s[3]\n" + "fmls z28.s, z14.s, z2.s[3]\n" + "fmls z17.s, z5.s, z2.s[3]\n" + "st1w { z17.s }, p0, [%x[output_row_0], %x[output_col_1_stride], LSL #2]\n" + "fmls z27.s, z24.s, z2.s[3]\n" + "st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n" + "st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n" + "st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n" + "st1w { z28.s }, p0, [%x[output_row_0], x8, LSL #2]\n" + "incb %x[output_row_0]\n" + "ld1w { z19.s }, p0/Z, [x16]\n" + "ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n" + "fmul z13.s, z7.s, z2.s[1]\n" + "ld1w { z6.s }, p0/Z, [x16, x26, LSL #2]\n" + "ld1w { z27.s }, p0/Z, [x16, x24, LSL #2]\n" + "fneg z13.s, p1/M, z13.s\n" + "ld1w { z25.s }, p0/Z, [x16, x22, LSL #2]\n" + "fsub z15.s, z25.s, z6.s\n" + "fmad z19.s, p1/M, z16.s, z25.s\n" + "ld1w { z20.s }, p0/Z, [x16, x20, LSL #2]\n" + "fmla z13.s, z27.s, z2.s[1]\n" + "ld1w { z0.s }, p0/Z, [x12]\n" + "incb x16\n" + "fmls z19.s, z6.s, z2.s[3]\n" + "ld1w { z31.s }, p0/Z, [x12, %x[input_col_1_stride], LSL #2]\n" + "fsub z8.s, z15.s, z13.s\n" + "fadd z28.s, z13.s, z15.s\n" + "ld1w { z1.s }, p0/Z, [x12, x26, LSL #2]\n" + "fmsb z6.s, p1/M, z16.s, z25.s\n" + "ld1w { z21.s }, p0/Z, [x12, x24, LSL #2]\n" + "fmul z25.s, z7.s, z2.s[2]\n" + "ld1w { z22.s }, p0/Z, [x12, x22, LSL #2]\n" + "fsub z15.s, z22.s, z1.s\n" + "fneg z25.s, p1/M, z25.s\n" + "ld1w { z17.s }, p0/Z, [x12, x20, LSL #2]\n" + "fadd z25.s, z25.s, z27.s\n" + "incb x12\n" + "fmad z7.s, p1/M, z16.s, z20.s\n" + "fmad z0.s, p1/M, z16.s, z22.s\n" + "fmul z13.s, z31.s, z2.s[1]\n" + "fadd z3.s, z25.s, z6.s\n" + "fsub z6.s, z6.s, z25.s\n" + "fmls z7.s, z27.s, z2.s[3]\n" + "fmls z0.s, z1.s, z2.s[3]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z21.s, z2.s[1]\n" + "fsub z9.s, z15.s, z13.s\n" + "fadd z27.s, z13.s, z15.s\n" + "fmsb z1.s, p1/M, z16.s, z22.s\n" + "fsub z15.s, z29.s, z12.s\n" + "fmul z22.s, z31.s, z2.s[2]\n" + "fmad z31.s, p1/M, z16.s, z17.s\n" + "fmul z13.s, z19.s, z2.s[1]\n" + "fmsb z12.s, p1/M, z16.s, z29.s\n" + "fneg z22.s, p1/M, z22.s\n" + "fadd z22.s, z22.s, z21.s\n" + "fmls z31.s, z21.s, z2.s[3]\n" + "fneg z13.s, p1/M, z13.s\n" + "fadd z25.s, z22.s, z1.s\n" + "fsub z1.s, z1.s, z22.s\n" + "fmla z13.s, z0.s, z2.s[1]\n" + "fmul z29.s, z19.s, z2.s[2]\n" + "fadd z22.s, z13.s, z15.s\n" + "st1w { z22.s }, p0, [x11]\n" + "fneg z29.s, p1/M, z29.s\n" + "fsub z22.s, z15.s, z13.s\n" + "fadd z29.s, z29.s, z0.s\n" + "st1w { z22.s }, p0, [x9]\n" + "fadd z22.s, z29.s, z12.s\n" + "fsub z15.s, z26.s, z5.s\n" + "fmul z13.s, z3.s, z2.s[1]\n" + "fsub z12.s, z12.s, z29.s\n" + "fmsb z5.s, p1/M, z16.s, z26.s\n" + "fmul z26.s, z3.s, z2.s[2]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z25.s, z2.s[1]\n" + "fneg z26.s, p1/M, z26.s\n" + "fadd z26.s, z26.s, z25.s\n" + "fadd z21.s, z13.s, z15.s\n" + "st1w { z21.s }, p0, [x11, %x[output_col_1_stride], LSL #2]\n" + "fsub z21.s, z15.s, z13.s\n" + "fmul z13.s, z6.s, z2.s[1]\n" + "fneg z13.s, p1/M, z13.s\n" + "st1w { z21.s }, p0, [x9, %x[output_col_1_stride], LSL #2]\n" + "fadd z21.s, z26.s, z5.s\n" + "fsub z15.s, z18.s, z24.s\n" + "fmla z13.s, z1.s, z2.s[1]\n" + "fsub z5.s, z5.s, z26.s\n" + "fmsb z24.s, p1/M, z16.s, z18.s\n" + "fmul z18.s, z6.s, z2.s[2]\n" + "fadd z20.s, z13.s, z15.s\n" + "st1w { z20.s }, p0, [x11, x25, LSL #2]\n" + "fneg z18.s, p1/M, z18.s\n" + "fsub z20.s, z15.s, z13.s\n" + "fadd z18.s, z18.s, z1.s\n" + "st1w { z20.s }, p0, [x9, x25, LSL #2]\n" + "fadd z20.s, z18.s, z24.s\n" + "fsub z15.s, z11.s, z4.s\n" + "fmul z13.s, z28.s, z2.s[1]\n" + "fsub z24.s, z24.s, z18.s\n" + "fmsb z4.s, p1/M, z16.s, z11.s\n" + "fmul z11.s, z28.s, z2.s[2]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z27.s, z2.s[1]\n" + "fneg z11.s, p1/M, z11.s\n" + "fadd z11.s, z11.s, z27.s\n" + "fadd z26.s, z13.s, z15.s\n" + "st1w { z26.s }, p0, [x11, x23, LSL #2]\n" + "fsub z26.s, z15.s, z13.s\n" + "fmul z13.s, z8.s, z2.s[1]\n" + "fneg z13.s, p1/M, z13.s\n" + "st1w { z26.s }, p0, [x9, x23, LSL #2]\n" + "fadd z26.s, z11.s, z4.s\n" + "fsub z15.s, z23.s, z30.s\n" + "fmla z13.s, z9.s, z2.s[1]\n" + "fsub z4.s, z4.s, z11.s\n" + "fmsb z30.s, p1/M, z16.s, z23.s\n" + "fmul z23.s, z8.s, z2.s[2]\n" + "fadd z18.s, z13.s, z15.s\n" + "st1w { z18.s }, p0, [x11, x21, LSL #2]\n" + "fneg z23.s, p1/M, z23.s\n" + "fsub z18.s, z15.s, z13.s\n" + "fadd z23.s, z23.s, z9.s\n" + "st1w { z18.s }, p0, [x9, x21, LSL #2]\n" + "fadd z18.s, z23.s, z30.s\n" + "fsub z15.s, z10.s, z14.s\n" + "fmul z13.s, z7.s, z2.s[1]\n" + "fsub z30.s, z30.s, z23.s\n" + "fmsb z14.s, p1/M, z16.s, z10.s\n" + "fmul z10.s, z7.s, z2.s[2]\n" + "fneg z13.s, p1/M, z13.s\n" + "fmla z13.s, z31.s, z2.s[1]\n" + "fneg z10.s, p1/M, z10.s\n" + "fadd z10.s, z10.s, z31.s\n" + "fadd z17.s, z13.s, z15.s\n" + "st1w { z17.s }, p0, [x11, x8, LSL #2]\n" + "fsub z17.s, z15.s, z13.s\n" + "incb x11\n" + "st1w { z17.s }, p0, [x9, x8, LSL #2]\n" + "fadd z17.s, z10.s, z14.s\n" + "fsub z14.s, z14.s, z10.s\n" + "st1w { z22.s }, p0, [x15]\n" + "incb x9\n" + "st1w { z12.s }, p0, [x13]\n" + "st1w { z21.s }, p0, [x15, %x[output_col_1_stride], LSL #2]\n" + "st1w { z5.s }, p0, [x13, %x[output_col_1_stride], LSL #2]\n" + "st1w { z20.s }, p0, [x15, x25, LSL #2]\n" + "st1w { z24.s }, p0, [x13, x25, LSL #2]\n" + "st1w { z26.s }, p0, [x15, x23, LSL #2]\n" + "st1w { z4.s }, p0, [x13, x23, LSL #2]\n" + "st1w { z18.s }, p0, [x15, x21, LSL #2]\n" + "st1w { z30.s }, p0, [x13, x21, LSL #2]\n" + "st1w { z17.s }, p0, [x15, x8, LSL #2]\n" + "incb x15\n" + "st1w { z14.s }, p0, [x13, x8, LSL #2]\n" + "incb x13\n" + "ld1w { z23.s }, p0/Z, [x28]\n" + "ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n" + "fmul z13.s, z22.s, z2.s[1]\n" + "ld1w { z21.s }, p0/Z, [x28, x26, LSL #2]\n" + "ld1w { z20.s }, p0/Z, [x28, x24, LSL #2]\n" + "fneg z13.s, p1/M, z13.s\n" + "ld1w { z26.s }, p0/Z, [x28, x22, LSL #2]\n" + "fsub z15.s, z26.s, z21.s\n" + "fmad z23.s, p1/M, z16.s, z26.s\n" + "ld1w { z18.s }, p0/Z, [x28, x20, LSL #2]\n" + "fmla z13.s, z20.s, z2.s[1]\n" + "incb x28\n" + "fmls z23.s, z21.s, z2.s[3]\n" + "fsub z17.s, z15.s, z13.s\n" + "fadd z30.s, z13.s, z15.s\n" + "fmsb z21.s, p1/M, z16.s, z26.s\n" + "fmul z26.s, z22.s, z2.s[2]\n" + "fmad z22.s, p1/M, z16.s, z18.s\n" + "fmad z19.s, p1/M, z16.s, z23.s\n" + "fmad z28.s, p1/M, z16.s, z30.s\n" + "fneg z26.s, p1/M, z26.s\n" + "fadd z26.s, z26.s, z20.s\n" + "fmls z22.s, z20.s, z2.s[3]\n" + "fmls z19.s, z0.s, z2.s[3]\n" + "st1w { z19.s }, p0, [x27]\n" + "fadd z23.s, z26.s, z21.s\n" + "fsub z21.s, z21.s, z26.s\n" + "fmls z28.s, z27.s, z2.s[3]\n" + "fmad z8.s, p1/M, z16.s, z17.s\n" + "fmad z7.s, p1/M, z16.s, z22.s\n" + "fmad z3.s, p1/M, z16.s, z23.s\n" + "fmad z6.s, p1/M, z16.s, z21.s\n" + "fmls z8.s, z9.s, z2.s[3]\n" + "fmls z7.s, z31.s, z2.s[3]\n" + "fmls z3.s, z25.s, z2.s[3]\n" + "st1w { z3.s }, p0, [x27, %x[output_col_1_stride], LSL #2]\n" + "fmls z6.s, z1.s, z2.s[3]\n" + "st1w { z6.s }, p0, [x27, x25, LSL #2]\n" + "st1w { z28.s }, p0, [x27, x23, LSL #2]\n" + "st1w { z8.s }, p0, [x27, x21, LSL #2]\n" + "st1w { z7.s }, p0, [x27, x8, LSL #2]\n" + "incb x27\n" + "whilelt p0.s, XZR, %x[num_channels]\n" + "bne 1b\n" + "2:" // channel_loop_end + + : [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output) + : [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride) + : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); +} + +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv + +#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_fp16.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms_fp16.cpp index 99f0f53792..35d61fa94d 100644 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_fp16.cpp +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms_fp16.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,14 +21,36 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "impl_fp16_fp16.hpp" -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -namespace depthwise +#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#include "input_transform.hpp" +#include "winograd_implementations.hpp" + +#include <memory> +#include <string> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +void a64_fp16_6x6(unsigned int, const __fp16 *, size_t, size_t, __fp16 *, size_t); + +#define IMPL(HEIGHT, WIDTH, FUNC, DRIVER) new Transform ## DRIVER <__fp16, __fp16>(#FUNC, HEIGHT, WIDTH, FUNC) + +static const TransformImplementation<__fp16> transforms_fp16[] = { + { IMPL(6, 6, a64_fp16_6x6, Unpadded) }, + { nullptr }, +}; + +template <> +const TransformImplementation<__fp16> *implementation_list(void) { -template class DepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>; -template class DepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>; -template class DepthwiseConvolution<3, 3, 5, 5, 1, 1, float16_t, float16_t, float16_t>; -template class DepthwiseConvolution<3, 3, 5, 5, 2, 2, float16_t, float16_t, float16_t>; -} // namespace depthwise -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + return transforms_fp16; +} + +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv + +#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp new file mode 100644 index 0000000000..df633903ca --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/input_transforms_fp32.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "input_transform.hpp" +#include "winograd_implementations.hpp" + +#include <memory> +#include <string> + +namespace arm_conv { +namespace winograd { +namespace input_transform { + +#if defined(__aarch64__) +#if defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(ARM_COMPUTE_ENABLE_SME) +void sme_fp32_mla_6x6(unsigned int, const float *, size_t, size_t, float *, size_t); +#endif // defined(ARM_COMPUTE_ENABLE_SME) +void sve_fp32_6x6(unsigned int, const float *, size_t, size_t, float *, size_t); +#endif // defined(ARM_COMPUTE_ENABLE_SVE) +void a64_fp32_6x6(unsigned int, const float *, size_t, size_t, float *, size_t); +#else // defined(__aarch64__) +void arm_fp32_6x6(unsigned int, const float *, size_t, size_t, float *, size_t); +#endif // defined(__aarch64__) +void arm_fp32_4x4(unsigned int, const float *, size_t, size_t, float *, size_t); +void arm_fp32_1x8(unsigned int, const float *, size_t, size_t, float *, size_t); + +#define IMPL(HEIGHT, WIDTH, FUNC, DRIVER) new Transform ## DRIVER <float, float>(#FUNC, HEIGHT, WIDTH, FUNC) + +static const TransformImplementation<float> transforms_fp32[] = { +#if defined(__aarch64__) +#if defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(ARM_COMPUTE_ENABLE_SME) + { IMPL(6, 6, sme_fp32_mla_6x6, Unpadded), MethodConstraints::RequiresSME }, +#endif // defined(ARM_COMPUTE_ENABLE_SME) + { IMPL(6, 6, sve_fp32_6x6, Unpadded), MethodConstraints::RequiresSVE }, +#endif // defined(ARM_COMPUTE_ENABLE_SVE) + { IMPL(6, 6, a64_fp32_6x6, Unpadded) }, +#else // defined(__aarch64__) + { IMPL(6, 6, arm_fp32_6x6, Unpadded) }, +#endif // defined(__aarch64__) + { IMPL(4, 4, arm_fp32_4x4, Unpadded) }, + { IMPL(1, 8, arm_fp32_1x8, Unpadded) }, + { new TransformUnpadded<float, float>("arm_fp32_1x8", 8, 1, TransformUnpadded<float, float>::get_transposed_kernel(arm_fp32_1x8)) }, + { nullptr }, +}; + +template <> +const TransformImplementation<float> *implementation_list(void) +{ + return transforms_fp32; +} + +} // namespace input_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/output_transform.hpp b/src/core/NEON/kernels/convolution/winograd/output_transform.hpp new file mode 100644 index 0000000000..971cc99cd2 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/output_transform.hpp @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "winograd.hpp" + +#include "src/core/NEON/kernels/arm_conv/addressing.hpp" + +#include <algorithm> +#include <cstring> +#include <functional> +#include <limits> + +namespace arm_conv { +namespace winograd { +namespace output_transform { + +/* Driver class for the Winograd output transforms. + * + * This provides a base implementation which handles iteration over the output + * tensor; subclasses are responsible for managing working space and executing + * the transform on individual tiles. + */ +template <typename TIn, typename TOut=TIn> +class TransformBase : public ITransform +{ + const std::string m_name; + const unsigned int m_output_rows, m_output_cols; + const unsigned int m_kernel_rows, m_kernel_cols; + + protected: + virtual size_t get_working_space_per_thread(const ConvolutionArgs &) const + { + return 0; + } + + virtual void initialise_thread_working_space(const ConvolutionArgs &, void *) const + { + // Nothing to do + } + + virtual void execute_tile( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_matrix, + const TIn *bias, + TOut *outptr, size_t ld_out_row, size_t ld_out_col, + TOut activation_min, TOut activation_max, + unsigned int valid_rows, unsigned int valid_cols, + void *working_space + ) const = 0; + + void execute_internal( + const ConvolutionArgs &args, + const TIn *inptr, size_t ld_in_batch, size_t ld_in_matrix, size_t ld_in_row, + const TIn *bias, + TOut *outptr, size_t ld_out_batch, size_t ld_out_row, size_t ld_out_col, + void *working_space, unsigned int thread_id, unsigned int n_threads + ) const + { + // Get the working space for this thread, and initialise it. + working_space = reinterpret_cast<char *>(working_space) + + this->get_working_space_per_thread(args) * thread_id; + this->initialise_thread_working_space(args, working_space); + + // Get the activation values + auto activation_min = static_cast<TOut>(-std::numeric_limits<float>::infinity()); + auto activation_max = static_cast<TOut>(+std::numeric_limits<float>::infinity()); + switch (args.activation.type) + { + case arm_gemm::Activation::Type::BoundedReLU: + activation_max = static_cast<TOut>(args.activation.param1); + // Fall through + case arm_gemm::Activation::Type::ReLU: + activation_min = static_cast<TOut>(0); + break; + default: + break; + } + + // Determine the number of tiles in a row, we use this to get the right + // offset into the input data. + const auto n_tile_cols = (args.output_shape.cols + this->get_output_cols() - 1) / this->get_output_cols(); + + // Execute over all batches + for (unsigned int batch = 0; batch < args.n_batches; batch++) + { + auto inptr_row = inptr + thread_id*n_tile_cols*ld_in_row; + auto outptr_row = outptr + thread_id*ld_out_row*this->get_output_rows(); + inptr += ld_in_batch; + outptr += ld_out_batch; + + // Stripe rows of tiles over threads. + for (auto out_i = thread_id * this->get_output_rows(); + out_i < args.output_shape.rows; + out_i += n_threads * this->get_output_rows()) + { + auto inptr_tile = inptr_row; + auto outptr_tile = outptr_row; + inptr_row += n_threads * n_tile_cols * ld_in_row; + outptr_row += n_threads * this->get_output_rows() * ld_out_row; + + // Iterate over all columns + for (auto out_j = 0u; out_j < args.output_shape.cols; + out_j += this->get_output_cols()) + { + // Execute the tile + this->execute_tile( + args.n_output_channels, + inptr_tile, ld_in_matrix, + bias, + outptr_tile, ld_out_row, ld_out_col, + activation_min, activation_max, + args.output_shape.rows - out_i, // Number of valid rows remaining + args.output_shape.cols - out_j, // Number of valid columns remaining + working_space + ); + + // Progress the pointers + inptr_tile += ld_in_row; + outptr_tile += this->get_output_cols() * ld_out_col; + } + } + } + } + + public: + TransformBase(const std::string &name, + unsigned int output_rows, unsigned int output_cols, + unsigned int kernel_rows, unsigned int kernel_cols) + : m_name(name), + m_output_rows(output_rows), m_output_cols(output_cols), + m_kernel_rows(kernel_rows), m_kernel_cols(kernel_cols) + { + } + + const std::string &get_name(void) const override { return m_name; } + + unsigned int get_input_rows(void) const override final { return m_kernel_rows + m_output_rows - 1; } + unsigned int get_input_cols(void) const override final { return m_kernel_cols + m_output_cols - 1; } + + unsigned int get_output_rows(void) const override final { return m_output_rows; } + unsigned int get_output_cols(void) const override final { return m_output_cols; } + + unsigned int get_kernel_rows(void) const override final { return m_kernel_rows; } + unsigned int get_kernel_cols(void) const override final { return m_kernel_cols; } + + size_t get_working_space_size(const ConvolutionArgs &args, unsigned int n_threads) const override + { + return n_threads * this->get_working_space_per_thread(args); + } + + void execute( + const ConvolutionArgs &args, + const void *inptr, size_t ld_in_batch, size_t ld_in_matrix, size_t ld_in_row, + const void *bias, + void *outptr, size_t ld_out_batch, size_t ld_out_row, size_t ld_out_col, + void *working_space, unsigned int thread_id, unsigned int n_threads + ) const override + { + execute_internal( + args, + reinterpret_cast<const TIn *>(inptr), ld_in_batch, ld_in_matrix, ld_in_row, + reinterpret_cast<const TIn *>(bias), + reinterpret_cast<TOut *>(outptr), ld_out_batch, ld_out_row, ld_out_col, + working_space, thread_id, n_threads + ); + } +}; + +template <typename TIn, typename TOut=TIn> +class TransformUnpadded : public TransformBase<TIn, TOut> +{ + using Kernel = std::function<void( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_matrix, + const TIn *bias, + TOut *outptr, size_t ld_out_row, size_t ld_out_col, + TOut activation_min, TOut activation_max + )>; + const Kernel m_kernel; + + protected: + size_t get_working_space_per_thread(const ConvolutionArgs &args) const override + { + // We create a buffer the size of the output tile + const auto n_output_points = this->get_output_rows() * this->get_output_cols(); + return sizeof(TOut) * n_output_points * args.n_output_channels; + } + + void execute_tile( + unsigned int n_channels, + const TIn *inptr, size_t ld_in_matrix, + const TIn *bias, + TOut *outptr, size_t ld_out_row, size_t ld_out_col, + TOut activation_min, TOut activation_max, + unsigned int valid_rows, unsigned int valid_cols, + void *working_space + ) const override final + { + // Get copies of the output tensor parameters + auto kernel_outptr = outptr; + auto kernel_ld_out_row = ld_out_row, kernel_ld_out_col = ld_out_col; + + // If there's padding on either the left or the right, then we execute the + // kernel into the output buffer and then perform a copy. + if (valid_rows < this->get_output_rows() || + valid_cols < this->get_output_cols()) + { + // Override the kernel output parameters + kernel_outptr = reinterpret_cast<TOut *>(working_space); + kernel_ld_out_col = n_channels; + kernel_ld_out_row = kernel_ld_out_col * this->get_output_cols(); + } + + // Execute the kernel + m_kernel( + n_channels, + inptr, ld_in_matrix, + bias, + kernel_outptr, kernel_ld_out_row, kernel_ld_out_col, + activation_min, activation_max + ); + + // If necessary, copy from the working space into the destination tensor. + if (valid_rows < this->get_output_rows() || + valid_cols < this->get_output_cols()) + { + const auto last_row = std::min(valid_rows, this->get_output_rows()); + const auto last_col = std::min(valid_cols, this->get_output_cols()); + + for (auto i = 0u; i < last_row; i++) + { + auto patch_tile = kernel_outptr; + auto out_tile = outptr; + kernel_outptr += kernel_ld_out_row; + outptr += ld_out_row; + + for (auto j = 0u; j < last_col; j++) + { + memcpy(out_tile, patch_tile, sizeof(TOut) * n_channels); + patch_tile += kernel_ld_out_col; + out_tile += ld_out_col; + } + } + } + } + + public: + TransformUnpadded(const std::string &name, + unsigned int output_rows, unsigned int output_cols, + unsigned int kernel_rows, unsigned int kernel_cols, + const Kernel kernel) + : TransformBase<TIn, TOut>(name, output_rows, output_cols, kernel_rows, kernel_cols), + m_kernel(kernel) + { + } + + /* Utility method to get a transposed variant of a kernel, this transposed + * version simply calls the original kernel with the output row and column + * strides swapped. + */ + static constexpr Kernel get_transposed_kernel(const Kernel &kernel) + { + return [kernel] ( + const unsigned int n_channels, + const TIn *const inptr, const size_t ld_in_matrix, + const TIn *const bias, + TOut *const outptr, const size_t ld_out_row, const size_t ld_out_col, + const TOut activation_min, const TOut activation_max + ) { + kernel(n_channels, inptr, ld_in_matrix, bias, + outptr, ld_out_col, ld_out_row, + activation_min, activation_max); + }; + } +}; + +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp index 3c071bdac6..295005a2ee 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp16_fp16_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2022, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,25 +22,29 @@ * SOFTWARE. */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#include "arm.hpp" -#include "output.hpp" -namespace winograd -{ +#include <algorithm> +#include <arm_neon.h> +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void winograd::OutputTransform<3, 3, 6, 6, __fp16, __fp16, winograd::WinogradRoots::Integers>::transform_tile( - const int n_channels, +void a64_fp16_4x4_3x3( + unsigned int n_channels, const __fp16* inptr, - const int matrix_stride, + size_t matrix_stride, const __fp16* bptr, __fp16* const output, - const int output_row_stride, - const int output_col_stride, - const __fp16 output_min, - const __fp16 output_max + size_t output_row_stride, + size_t output_col_stride, + __fp16 output_min, + __fp16 output_max ) { + constexpr int output_tile_rows = 4, output_tile_cols = 4; + // Construct a map to the output cells __fp16 *outptrs[output_tile_rows][output_tile_cols]; for (int i = 0; i < output_tile_rows; i++) @@ -249,7 +253,8 @@ void winograd::OutputTransform<3, 3, 6, 6, __fp16, __fp16, winograd::WinogradRoo } } -template class OutputTransform<3, 3, 6, 6, __fp16, __fp16, winograd::WinogradRoots::Integers>; +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv -} // namespace winograd #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp index 8e257909a3..8c6cf9725e 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2_7_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2022-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,42 +22,36 @@ * SOFTWARE. */ -#include "arm.hpp" -#include "output.hpp" +#include <algorithm> +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, +void arm_fp32_1x2_1x7( + unsigned int n_channels, const float* inptr, - const int matrix_stride, + size_t matrix_stride, const float* bptr, - float* const output, - const int, // No need to stride across rows - const int output_col_stride, - const float output_min, - const float output_max + float *outptr, + size_t, // No need to stride across rows + size_t output_col_stride, + float output_min, + float output_max ) { - // Construct a map to the output cells - float *outptrs[output_tile_cols]; - for (int j = 0; j < output_tile_cols; j++) - { - outptrs[j] = output + j*output_col_stride; - } + constexpr auto inner_tile_cols = 8u, output_tile_cols = 2u; // For each channel of the output - int channels_remaining = n_channels; -#ifdef __arm_any__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed during this transform float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f); // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = vld1q_f32(inptr + j*matrix_stride); } @@ -72,21 +66,21 @@ void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transfo b = vld1q_f32(bptr); bptr += 4; } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vminq_f32(vmaxq_f32(f[j] + b, vdupq_n_f32(output_min)), vdupq_n_f32(output_max)); - vst1q_f32(outptrs[j], y); - outptrs[j] += 4; + vst1q_f32(outptr + j*output_col_stride, y); } + outptr += 4; } - for (; channels_remaining >= 2; channels_remaining -= 2) + for (; n_channels >= 2; n_channels -= 2) { // Matrices used and computed during this transform float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f); // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = vld1_f32(inptr + j*matrix_stride); } @@ -101,26 +95,24 @@ void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transfo b = vld1_f32(bptr); bptr += 2; } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmin_f32(vmax_f32(f[j] + b, vdup_n_f32(output_min)), vdup_n_f32(output_max)); - vst1_f32(outptrs[j], y); - outptrs[j] += 2; + vst1_f32(outptr + j*output_col_stride, y); } + outptr += 2; } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + if (n_channels) { // Matrices used and computed during this transform float F[inner_tile_cols], f[output_tile_cols], b = 0.0f; // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = *(inptr + j*matrix_stride); } - inptr++; f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1; f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1 + F[7]*1; @@ -130,14 +122,13 @@ void OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::transfo { b = *(bptr++); } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { - *(outptrs[j]++) = std::max(std::min(f[j] + b, output_max), output_min); + *(outptr + j*output_col_stride) = std::max(std::min(f[j] + b, output_max), output_min); } } } -template class OutputTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>; -template class OutputTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>; - +} // namespace output_transform } // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp index c35037e143..ac05f23221 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4_5_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2022-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,42 +22,36 @@ * SOFTWARE. */ -#include "output.hpp" -#include "arm.hpp" +#include <algorithm> +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, +void arm_fp32_1x4_1x5( + unsigned int n_channels, const float* inptr, - const int matrix_stride, + size_t matrix_stride, const float* bptr, - float* const output, - const int, // No need to stride across rows - const int output_col_stride, - const float output_min, - const float output_max + float *outptr, + size_t, // No need to stride across rows + size_t output_col_stride, + float output_min, + float output_max ) { - // Construct a map to the output cells - float *outptrs[output_tile_cols]; - for (int j = 0; j < output_tile_cols; j++) - { - outptrs[j] = output + j*output_col_stride; - } + constexpr auto inner_tile_cols = 8u, output_tile_cols = 4u; // For each channel of the output - int channels_remaining = n_channels; -#ifdef __arm_any__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed during this transform float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f); // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = vld1q_f32(inptr + j*matrix_stride); } @@ -74,22 +68,22 @@ void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transfo b = vld1q_f32(bptr); bptr += 4; } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmaxq_f32(vminq_f32(vaddq_f32(f[j], b), vdupq_n_f32(output_max)), vdupq_n_f32(output_min)); - vst1q_f32(outptrs[j], y); - outptrs[j] += 4; + vst1q_f32(outptr + j*output_col_stride, y); } + outptr += 4; } - for (; channels_remaining >= 2; channels_remaining -= 2) + for (; n_channels >= 2; n_channels -= 2) { // Matrices used and computed during this transform float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f); // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = vld1_f32(inptr + j*matrix_stride); } @@ -106,23 +100,22 @@ void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transfo b = vld1_f32(bptr); bptr += 2; } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmax_f32(vmin_f32(vadd_f32(f[j], b), vdup_n_f32(output_max)), vdup_n_f32(output_min)); - vst1_f32(outptrs[j], y); - outptrs[j] += 2; + vst1_f32(outptr + j*output_col_stride, y); } + outptr += 2; } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + for (; n_channels; n_channels--) { // Matrices used and computed during this transform float F[inner_tile_cols], f[output_tile_cols], b = 0.0f; // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = *(inptr + j*matrix_stride); } @@ -138,15 +131,15 @@ void OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::transfo { b = *(bptr++); } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = std::max(std::min(f[j] + b, output_max), output_min); - *(outptrs[j]++) = y; + *(outptr + j*output_col_stride) = y; } + outptr++; } } -template class OutputTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>; -template class OutputTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>; - +} // namespace output_transform } // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp index 528cd8c691..154dc6fe1a 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_6_3_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2022-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,42 +22,37 @@ * SOFTWARE. */ -#include "output.hpp" -#include "arm.hpp" +#include <algorithm> +#include <cstddef> -namespace winograd -{ +#include <arm_neon.h> + +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, +void arm_fp32_1x6_1x3( + unsigned int n_channels, const float* inptr, - const int matrix_stride, + size_t matrix_stride, const float* bptr, - float* const output, - const int, // No need to stride across rows - const int output_col_stride, - const float output_min, - const float output_max + float *outptr, + size_t, // No need to stride across rows + size_t output_col_stride, + float output_min, + float output_max ) { - // Construct a map to the output cells - float *outptrs[output_tile_cols]; - for (int j = 0; j < output_tile_cols; j++) - { - outptrs[j] = output + j*output_col_stride; - } + constexpr unsigned int inner_tile_cols = 8, output_tile_cols = 6; // For each channel of the output - int channels_remaining = n_channels; -#ifdef __arm_any__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed during this transform float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f); // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = vld1q_f32(inptr + j*matrix_stride); } @@ -76,21 +71,21 @@ void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transfo b = vld1q_f32(bptr); bptr += 4; } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vminq_f32(vmaxq_f32(f[j] + b, vdupq_n_f32(output_min)), vdupq_n_f32(output_max)); - vst1q_f32(outptrs[j], y); - outptrs[j] += 4; + vst1q_f32(outptr + j*output_col_stride, y); } + outptr += 4; } - for (; channels_remaining >= 2; channels_remaining -= 2) + for (; n_channels >= 2; n_channels -= 2) { // Matrices used and computed during this transform float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f); // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = vld1_f32(inptr + j*matrix_stride); } @@ -109,22 +104,21 @@ void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transfo b = vld1_f32(bptr); bptr += 2; } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmin_f32(vmax_f32(f[j] + b, vdup_n_f32(output_min)), vdup_n_f32(output_max)); - vst1_f32(outptrs[j], y); - outptrs[j] += 2; + vst1_f32(outptr + j*output_col_stride, y); } + outptr += 2; } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + for (; n_channels; n_channels--) { // Matrices used and computed during this transform float F[inner_tile_cols], f[output_tile_cols], b = 0.0f; // Read a 1x8 tile in the Winograd domain - for (int j = 0; j < inner_tile_cols; j++) + for (auto j = 0u; j < inner_tile_cols; j++) { F[j] = *(inptr + j*matrix_stride); } @@ -142,14 +136,14 @@ void OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::transfo { b = *(bptr++); } - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { - *(outptrs[j]++) = std::max(std::min(f[j] + b, output_max), output_min); + *(outptr + j*output_col_stride) = std::max(std::min(f[j] + b, output_max), output_min); } + outptr++; } } -template class OutputTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>; -template class OutputTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>; - -} // namespace +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp index 8b0b4707f9..28f042bcbf 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_3x3_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2022, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,47 +22,38 @@ * SOFTWARE. */ -#include "arm.hpp" -#include "output.hpp" +#include <algorithm> +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, +void arm_fp32_2x2_3x3( + unsigned int n_channels, const float* inptr, - const int matrix_stride, + size_t matrix_stride, const float* bptr, - float* const output, - const int output_row_stride, - const int output_col_stride, - const float output_min, - const float output_max + float *outptr, + size_t output_row_stride, + size_t output_col_stride, + float output_min, + float output_max ) { - // Construct a map to the output cells - float *outptrs[output_tile_rows][output_tile_cols]; - for (int i = 0; i < output_tile_rows; i++) - { - for (int j = 0; j < output_tile_cols; j++) - { - outptrs[i][j] = output + i*output_row_stride + j*output_col_stride; - } - } + constexpr auto output_tile_rows = 2u, output_tile_cols = 2u; // For each channel of the output - int channels_remaining = n_channels; -#ifdef __aarch64__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed during this transform float32x4_t F[4][4], FZ[4][2], f[2][2], b; // Read a 4x4 tile in the Winograd domain - for (int i = 0, m = 0; i < 4; i++) + for (auto i = 0u, m = 0u; i < 4; i++) { - for (int j = 0; j < 4; j++, m++) + for (auto j = 0u; j < 4; j++, m++) { F[i][j] = vld1q_f32(inptr + m*matrix_stride); } @@ -70,7 +61,7 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo inptr += 4; // Compute the matrix F Z - for (int i = 0; i < 4; i++) + for (auto i = 0u; i < 4; i++) { // FZ[i][0] = F[i][0] + F[i][1] + F[i][2]; FZ[i][0] = vaddq_f32(vaddq_f32(F[i][0], F[i][1]), F[i][2]); @@ -80,7 +71,7 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo } // Compute the output tile f = ZT F Z - for (int j = 0; j < 2; j++) + for (auto j = 0u; j < 2; j++) { // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j]; f[0][j] = vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), FZ[2][j]); @@ -101,29 +92,27 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo } // Write out the output tile - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmaxq_f32(vminq_f32(vaddq_f32(f[i][j], b), vdupq_n_f32(output_max)), vdupq_n_f32(output_min)); - vst1q_f32(outptrs[i][j], y); - outptrs[i][j] += 4; + vst1q_f32(outptr + i*output_row_stride + j*output_col_stride, y); } } + outptr += 4; } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 2; channels_remaining -= 2) + for (; n_channels >= 2; n_channels -= 2) { // Matrices used and computed during this transform float32x2_t F[4][4], FZ[4][2], f[2][2], b; // Read a 4x4 tile in the Winograd domain - for (int i = 0, m = 0; i < 4; i++) + for (auto i = 0u, m = 0u; i < 4; i++) { - for (int j = 0; j < 4; j++, m++) + for (auto j = 0u; j < 4; j++, m++) { F[i][j] = vld1_f32(inptr + m*matrix_stride); } @@ -131,7 +120,7 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo inptr += 2; // Compute the matrix F Z - for (int i = 0; i < 4; i++) + for (auto i = 0u; i < 4; i++) { // FZ[i][0] = F[i][0] + F[i][1] + F[i][2]; FZ[i][0] = vadd_f32(vadd_f32(F[i][0], F[i][1]), F[i][2]); @@ -141,7 +130,7 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo } // Compute the output tile f = ZT F Z - for (int j = 0; j < 2; j++) + for (auto j = 0u; j < 2; j++) { // f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j]; f[0][j] = vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), FZ[2][j]); @@ -162,28 +151,27 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo } // Write out the output tile - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmax_f32(vmin_f32(vadd_f32(f[i][j], b), vdup_n_f32(output_max)), vdup_n_f32(output_min)); - vst1_f32(outptrs[i][j], y); - outptrs[i][j] += 2; + vst1_f32(outptr + i*output_row_stride + j*output_col_stride, y); } } + outptr += 2; } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + for (; n_channels; n_channels--) { // Matrices used and computed during this transform float F[4][4], FZ[4][2], f[2][2], b; // Read a 4x4 tile in the Winograd domain - for (int i = 0, m = 0; i < 4; i++) + for (auto i = 0u, m = 0u; i < 4; i++) { - for (int j = 0; j < 4; j++, m++) + for (auto j = 0u; j < 4; j++, m++) { F[i][j] = *(inptr + m*matrix_stride); } @@ -191,14 +179,14 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo inptr++; // Compute the matrix F Z - for (int i = 0; i < 4; i++) + for (auto i = 0u; i < 4; i++) { FZ[i][0] = F[i][0] + F[i][1] + F[i][2]; FZ[i][1] = F[i][1] - F[i][2] - F[i][3]; } // Compute the output tile f = ZT F Z - for (int j = 0; j < 2; j++) + for (auto j = 0u; j < 2; j++) { f[0][j] = FZ[0][j] + FZ[1][j] + FZ[2][j]; f[1][j] = FZ[1][j] - FZ[2][j] - FZ[3][j]; @@ -215,17 +203,18 @@ void OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::transfo } // Write out the output tile - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = std::max(std::min(f[i][j] + b, output_max), output_min); - *(outptrs[i][j]++) = y; + *(outptr + i*output_row_stride + j*output_col_stride) = y; } } + outptr++; } } -template class OutputTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>; - -} // namespace +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp index 3996be1c52..8e5ba74ac3 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_2x2_5x5_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2022, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,47 +22,38 @@ * SOFTWARE. */ -#include "output.hpp" -#include "arm.hpp" +#include <algorithm> +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transform_tile( - const int n_channels, +void arm_fp32_2x2_5x5( + unsigned int n_channels, const float* inptr, - const int matrix_stride, + size_t matrix_stride, const float* bptr, - float* const output, - const int output_row_stride, - const int output_col_stride, - const float output_min, - const float output_max + float *outptr, + size_t output_row_stride, + size_t output_col_stride, + float output_min, + float output_max ) { - // Construct a map to the output cells - float *outptrs[output_tile_rows][output_tile_cols]; - for (int i = 0; i < output_tile_rows; i++) - { - for (int j = 0; j < output_tile_cols; j++) - { - outptrs[i][j] = output + i*output_row_stride + j*output_col_stride; - } - } + constexpr auto output_tile_rows = 2u, output_tile_cols = 2u; // For each channel of the output - int channels_remaining = n_channels; -#ifdef __aarch64__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed during this transform float32x4_t F[6][6], FZ[6][2], f[2][2], b; // Read a 6x6 tile in the Winograd domain - for (int i = 0, m = 0; i < 6; i++) + for (auto i = 0u, m = 0u; i < 6; i++) { - for (int j = 0; j < 6; j++, m++) + for (auto j = 0u; j < 6; j++, m++) { F[i][j] = vld1q_f32(inptr + m*matrix_stride); } @@ -70,7 +61,7 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo inptr += 4; // Compute the matrix F Z - for (int i = 0; i < 6; i++) + for (auto i = 0u; i < 6; i++) { // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]); @@ -80,7 +71,7 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo } // Compute the output tile f = ZT F Z - for (int j = 0; j < 2; j++) + for (auto j = 0u; j < 2; j++) { // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]); @@ -99,29 +90,27 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo { b = vdupq_n_f32(0.0f); } - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmaxq_f32(vminq_f32(vaddq_f32(f[i][j], b), vdupq_n_f32(output_max)), vdupq_n_f32(output_min)); - vst1q_f32(outptrs[i][j], y); - outptrs[i][j] += 4; + vst1q_f32(outptr + i*output_row_stride + j*output_col_stride, y); } } + outptr += 4; } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 2; channels_remaining -= 2) + for (; n_channels >= 2; n_channels -= 2) { // Matrices used and computed during this transform float32x2_t F[6][6], FZ[6][2], f[2][2], b; // Read a 6x6 tile in the Winograd domain - for (int i = 0, m = 0; i < 6; i++) + for (auto i = 0u, m = 0u; i < 6; i++) { - for (int j = 0; j < 6; j++, m++) + for (auto j = 0u; j < 6; j++, m++) { F[i][j] = vld1_f32(inptr + m*matrix_stride); } @@ -129,7 +118,7 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo inptr += 2; // Compute the matrix F Z - for (int i = 0; i < 6; i++) + for (auto i = 0u; i < 6; i++) { // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]); @@ -139,7 +128,7 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo } // Compute the output tile f = ZT F Z - for (int j = 0; j < 2; j++) + for (auto j = 0u; j < 2; j++) { // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]); @@ -158,43 +147,41 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo { b = vdup_n_f32(0.0f); } - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmax_f32(vmin_f32(vadd_f32(f[i][j], b), vdup_n_f32(output_max)), vdup_n_f32(output_min)); - vst1_f32(outptrs[i][j], y); - outptrs[i][j] += 2; + vst1_f32(outptr + i*output_row_stride + j*output_col_stride, y); } } + outptr += 2; } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + if (n_channels) { // Matrices used and computed during this transform float F[6][6], FZ[6][2], f[2][2], b; // Read a 6x6 tile in the Winograd domain - for (int i = 0, m = 0; i < 6; i++) + for (auto i = 0u, m = 0u; i < 6; i++) { - for (int j = 0; j < 6; j++, m++) + for (auto j = 0u; j < 6; j++, m++) { F[i][j] = *(inptr + m*matrix_stride); } } - inptr++; // Compute the matrix F Z - for (int i = 0; i < 6; i++) + for (auto i = 0u; i < 6; i++) { FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4] + 1*F[i][5]; } // Compute the output tile f = ZT F Z - for (int j = 0; j < 2; j++) + for (auto j = 0u; j < 2; j++) { f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j] + 1*FZ[5][j]; @@ -209,17 +196,17 @@ void OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::transfo { b = 0.0f; } - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = std::max(std::min(f[i][j] + b, output_max), output_min); - *(outptrs[i][j]++) = y; + *(outptr + i*output_row_stride + j*output_col_stride) = y; } } } } -template class OutputTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>; - -} // namespace +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp index 1eb9b537d2..72c43019fa 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output_4x4_3x3_fp32_fp32_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2022, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,48 +22,38 @@ * SOFTWARE. */ -#include "arm.hpp" -#include "output.hpp" +#include <algorithm> +#include <cstddef> +#include <arm_neon.h> -namespace winograd -{ +namespace arm_conv { +namespace winograd { +namespace output_transform { -template <> -void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>::transform_tile( - const int n_channels, +void arm_fp32_4x4_3x3( + unsigned int n_channels, const float* inptr, - const int matrix_stride, + size_t matrix_stride, const float* bptr, - float* const output, - const int output_row_stride, - const int output_col_stride, - const float output_min, - const float output_max + float *outptr, + size_t output_row_stride, + size_t output_col_stride, + float output_min, + float output_max ) { - // Construct a map to the output cells - float *outptrs[output_tile_rows][output_tile_cols]; - for (int i = 0; i < output_tile_rows; i++) - { - for (int j = 0; j < output_tile_cols; j++) - { - outptrs[i][j] = output + i*output_row_stride + j*output_col_stride; - } - } + constexpr auto output_tile_rows = 4u, output_tile_cols = 4u; // For each channel of the output - int channels_remaining = n_channels; - -#ifdef __aarch64__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed during this transform float32x4_t F[6][6], FZ[6][4], f[4][4], b; // Read a 6x6 tile in the Winograd domain - for (int i = 0, m = 0; i < 6; i++) + for (auto i = 0u, m = 0u; i < 6; i++) { - for (int j = 0; j < 6; j++, m++) + for (auto j = 0u; j < 6; j++, m++) { F[i][j] = vld1q_f32(inptr + m*matrix_stride); } @@ -71,7 +61,7 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots inptr += 4; // Compute the matrix F Z - for (int i = 0; i < 6; i++) + for (auto i = 0u; i < 6; i++) { // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; FZ[i][0] = vaddq_f32(vaddq_f32(vaddq_f32(F[i][0], F[i][1]), vaddq_f32(F[i][2], F[i][3])), F[i][4]); @@ -87,7 +77,7 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots } // Compute the output tile f = ZT F Z - for (int j = 0; j < 4; j++) + for (auto j = 0u; j < 4; j++) { // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; f[0][j] = vaddq_f32(vaddq_f32(vaddq_f32(FZ[0][j], FZ[1][j]), vaddq_f32(FZ[2][j], FZ[3][j])), FZ[4][j]); @@ -112,29 +102,27 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots { b = vdupq_n_f32(0.0f); } - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmaxq_f32(vminq_f32(vaddq_f32(f[i][j], b), vdupq_n_f32(output_max)), vdupq_n_f32(output_min)); - vst1q_f32(outptrs[i][j], y); - outptrs[i][j] += 4; + vst1q_f32(outptr + i*output_row_stride + j*output_col_stride, y); } } + outptr += 4; } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 2; channels_remaining -= 2) + for (; n_channels >= 2; n_channels -= 2) { // Matrices used and computed during this transform float32x2_t F[6][6], FZ[6][4], f[4][4], b; // Read a 6x6 tile in the Winograd domain - for (int i = 0, m = 0; i < 6; i++) + for (auto i = 0u, m = 0u; i < 6; i++) { - for (int j = 0; j < 6; j++, m++) + for (auto j = 0u; j < 6; j++, m++) { F[i][j] = vld1_f32(inptr + m*matrix_stride); } @@ -142,7 +130,7 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots inptr += 2; // Compute the matrix F Z - for (int i = 0; i < 6; i++) + for (auto i = 0u; i < 6; i++) { // FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; FZ[i][0] = vadd_f32(vadd_f32(vadd_f32(F[i][0], F[i][1]), vadd_f32(F[i][2], F[i][3])), F[i][4]); @@ -158,7 +146,7 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots } // Compute the output tile f = ZT F Z - for (int j = 0; j < 4; j++) + for (auto j = 0u; j < 4; j++) { // f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; f[0][j] = vadd_f32(vadd_f32(vadd_f32(FZ[0][j], FZ[1][j]), vadd_f32(FZ[2][j], FZ[3][j])), FZ[4][j]); @@ -183,28 +171,27 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots { b = vdup_n_f32(0.0f); } - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = vmax_f32(vmin_f32(vadd_f32(f[i][j], b), vdup_n_f32(output_max)), vdup_n_f32(output_min)); - vst1_f32(outptrs[i][j], y); - outptrs[i][j] += 2; + vst1_f32(outptr + i*output_row_stride + j*output_col_stride, y); } } + outptr += 2; } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + for (; n_channels; n_channels--) { // Matrices used and computed during this transform float F[6][6], FZ[6][4], f[4][4], b; // Read a 6x6 tile in the Winograd domain - for (int i = 0, m = 0; i < 6; i++) + for (auto i = 0u, m = 0u; i < 6; i++) { - for (int j = 0; j < 6; j++, m++) + for (auto j = 0u; j < 6; j++, m++) { F[i][j] = *(inptr + m*matrix_stride); } @@ -212,7 +199,7 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots inptr++; // Compute the matrix F Z - for (int i = 0; i < 6; i++) + for (auto i = 0u; i < 6; i++) { FZ[i][0] = 1*F[i][0] + 1*F[i][1] + 1*F[i][2] + 1*F[i][3] + 1*F[i][4]; FZ[i][1] = 1*F[i][1] + -1*F[i][2] + 2*F[i][3] + -2*F[i][4]; @@ -221,7 +208,7 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots } // Compute the output tile f = ZT F Z - for (int j = 0; j < 4; j++) + for (auto j = 0u; j < 4; j++) { f[0][j] = 1*FZ[0][j] + 1*FZ[1][j] + 1*FZ[2][j] + 1*FZ[3][j] + 1*FZ[4][j]; f[1][j] = 1*FZ[1][j] + -1*FZ[2][j] + 2*FZ[3][j] + -2*FZ[4][j]; @@ -238,17 +225,18 @@ void winograd::OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots { b = 0.0f; } - for (int i = 0; i < output_tile_rows; i++) + for (auto i = 0u; i < output_tile_rows; i++) { - for (int j = 0; j < output_tile_cols; j++) + for (auto j = 0u; j < output_tile_cols; j++) { const auto y = std::max(std::min(f[i][j] + b, output_max), output_min); - *(outptrs[i][j]++) = y; + *(outptr + i*output_row_stride + j*output_col_stride) = y; } } + outptr++; } } -template class OutputTransform<3, 3, 6, 6, float, float, winograd::WinogradRoots::Integers>; - +} // namespace output_transform } // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp new file mode 100644 index 0000000000..043914d590 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp @@ -0,0 +1,891 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if defined(ARM_COMPUTE_ENABLE_SME) + +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace output_transform { + +void sme_fp32_mopa_4x4_3x3( + unsigned int n_channels, + const float* inptr, + size_t matrix_stride, + const float* bptr, + float* const output, + size_t output_row_stride, + size_t output_col_stride, + float output_min, + float output_max +) +{ + // The below assembler uses the Kronecker product and the "vec trick" to + // implement the Winograd output transform (y = AT Y A) using the SME + // array. This code REQUIRES that the vectors are 512b long (or longer, if + // we add some predication). + // + // The "vec trick" uses the identity $vec(AT Y A) = (AT (x) AT) vec(Y)$ to + // convert the chain of matrix multiplications into a matrix-vector + // product. We then stack multiple channels of vec(Y) together to allow us + // to perform multiple channels of the transformation simultaneously. + // + // Since the complete matrix (AT (x) AT) is quite big [16 x 36], we compute + // it on the fly. To do so, we store two representations of the matrix AT. + // The first representation (the outer terms) contains, within each quad, + // four coefficients of the matrix AT. + const float outer_terms[32] = { + 1, 1, 1, 1, + 0, 1, -1, 2, + 0, 1, 1, 4, + 0, 1, -1, 8, + // The following rows are continuations of the first four rows, and each + // contains two columns of padding values which aren't used in the + // computation but are there to ensure that the coefficients end up in + // the right quads of the vector into which they're read. + 1, 0, 0, 0, + -2, 0, 0, 0, + 4, 0, 0, 0, + -8, 1, 0, 0 + }; + // This should be loaded completely into two Z registers. + // + // We can then use by-element FMLA to construct columns of (AT (x) AT) by + // multiplying elements of the outer terms against the following inner + // terms (again split into quads, but expected to be loaded replicated such + // that each of the six required Z registers contains a repeated quad of + // the values). + const float inner_terms[24] = { + 1, 0, 0, 0, + 1, 1, 1, 1, + 1, -1, 1, -1, + 1, 2, 4, 8, + 1, -2, 4, -8, + 0, 0, 0, 1 + }; + + struct Params + { + const float *outer_terms; + const float *inner_terms; + float act_min; + float act_max; + + Params(const float *outer_terms, + const float *inner_terms, + float act_min, + float act_max) + : outer_terms(outer_terms), inner_terms(inner_terms), + act_min(act_min), act_max(act_max) + { + } + }; + + Params params(outer_terms, inner_terms, output_min, output_max); + + __asm__ __volatile__( + "ldr x20, [%x[params], %[offsetof_Params_outer_terms]]\n" + ".inst 0xd503477f // SMSTART ZA\n" + "ptrue p5.b\n" + "ld1rw { z12.s }, p5/Z, [%x[params], %[offsetof_Params_act_min]]\n" + "ld1rw { z10.s }, p5/Z, [%x[params], %[offsetof_Params_act_max]]\n" + "pfalse p8.b\n" + "ldr x8, [%x[params], %[offsetof_Params_inner_terms]]\n" + "ld1w { z6.s }, p5/Z, [x20]\n" + "ld1w { z7.s }, p5/Z, [x20, #1, MUL VL]\n" + "ld1rqw { z9.s }, p5/Z, [x8]\n" + "ld1rqw { z8.s }, p5/Z, [x8, #16]\n" + "ld1rqw { z15.s }, p5/Z, [x8, #32]\n" + "fmul z11.s, z9.s, z6.s[0]\n" + "fmul z5.s, z9.s, z6.s[1]\n" + "ld1rqw { z4.s }, p5/Z, [x8, #48]\n" + "ld1rqw { z3.s }, p5/Z, [x8, #64]\n" + "ld1rqw { z2.s }, p5/Z, [x8, #80]\n" + "cbz %x[bptr], 1f\n" + "ptrue p8.s\n" + "1:" // Set bias predicate: Done + ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n" + "fmov z1.s, #1.0\n" + "mov x25, #0x0\n" + "cntw x24\n" + "cntw x23, ALL, MUL #2\n" + "cntw x22, ALL, MUL #3\n" + "whilelt p4.s, x25, %x[n_channels]\n" + "whilelt p3.s, x24, %x[n_channels]\n" + "ld1w { z31.s }, p4/Z, [%x[inptr], x25, LSL #2]\n" + "ld1w { z30.s }, p3/Z, [%x[inptr], x24, LSL #2]\n" + "whilelt p2.s, x23, %x[n_channels]\n" + "whilelt p1.s, x22, %x[n_channels]\n" + "ld1w { z29.s }, p2/Z, [%x[inptr], x23, LSL #2]\n" + "add x21, %x[inptr], %x[matrix_stride], LSL #2\n" + "and p0.b, p5/Z, p8.b, p4.b\n" + "ld1w { z28.s }, p1/Z, [%x[inptr], x22, LSL #2]\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x25, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p3.b\n" + ".inst 0x8080b420 // fmopa za0.s, p5/M, p5/M, z1.s, z0.s\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x24, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p2.b\n" + ".inst 0x8080b421 // fmopa za1.s, p5/M, p5/M, z1.s, z0.s\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x23, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p1.b\n" + ".inst 0x8080b422 // fmopa za2.s, p5/M, p5/M, z1.s, z0.s\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x22, LSL #2]\n" + ".inst 0x8080b423 // fmopa za3.s, p5/M, p5/M, z1.s, z0.s\n" + "2:" // Loop + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + "mov x14, #0xc\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + "whilelt p0.s, x25, %x[n_channels]\n" + "add x20, %x[output], %x[output_col_stride], LSL #2\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + "add x8, %x[output], %x[output_row_stride], LSL #2\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z9.s, z6.s[2]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z9.s, z6.s[3]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z9.s, z7.s[0]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z9.s, z7.s[1]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z8.s, z6.s[0]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z8.s, z6.s[1]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z8.s, z6.s[2]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z8.s, z6.s[3]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z8.s, z7.s[0]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z8.s, z7.s[1]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z15.s, z6.s[0]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z15.s, z6.s[1]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z15.s, z6.s[2]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z15.s, z6.s[3]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z15.s, z7.s[0]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z15.s, z7.s[1]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z4.s, z6.s[0]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z4.s, z6.s[1]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z4.s, z6.s[2]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z4.s, z6.s[3]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z4.s, z7.s[0]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z4.s, z7.s[1]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z3.s, z6.s[0]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z3.s, z6.s[1]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z3.s, z6.s[2]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z3.s, z6.s[3]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z3.s, z7.s[0]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z3.s, z7.s[1]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + "ld1w { z31.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + "ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z2.s, z6.s[0]\n" + "ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + "ld1w { z28.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z2.s, z6.s[1]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z2.s, z6.s[2]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z2.s, z6.s[3]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0x809fb560 // fmopa za0.s, p5/M, p5/M, z11.s, z31.s\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + ".inst 0x809eb561 // fmopa za1.s, p5/M, p5/M, z11.s, z30.s\n" + ".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n" + ".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n" + "fmul z11.s, z2.s, z7.s[0]\n" + ".inst 0x809bb4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z27.s\n" + ".inst 0x809ab4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z26.s\n" + ".inst 0x8099b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z25.s\n" + ".inst 0x8098b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z24.s\n" + "fmul z5.s, z2.s, z7.s[1]\n" + ".inst 0x8097b560 // fmopa za0.s, p5/M, p5/M, z11.s, z23.s\n" + ".inst 0x8096b561 // fmopa za1.s, p5/M, p5/M, z11.s, z22.s\n" + ".inst 0x8095b562 // fmopa za2.s, p5/M, p5/M, z11.s, z21.s\n" + ".inst 0x8094b563 // fmopa za3.s, p5/M, p5/M, z11.s, z20.s\n" + "fmul z11.s, z9.s, z6.s[0]\n" + ".inst 0x8093b4a0 // fmopa za0.s, p5/M, p5/M, z5.s, z19.s\n" + ".inst 0x8092b4a1 // fmopa za1.s, p5/M, p5/M, z5.s, z18.s\n" + ".inst 0x8091b4a2 // fmopa za2.s, p5/M, p5/M, z5.s, z17.s\n" + ".inst 0x8090b4a3 // fmopa za3.s, p5/M, p5/M, z5.s, z16.s\n" + "fmul z5.s, z9.s, z6.s[1]\n" + ".inst 0xc082741f // mova z31.s, p5/M, za0h.s[XZR]\n" + ".inst 0xc082541c // mova z28.s, p5/M, za0h.s[x14]\n" + "fmin z31.s, p5/M, z31.s, z10.s\n" + ".inst 0xc082743b // mova z27.s, p5/M, za0h.s[XZR, #1]\n" + "fmin z28.s, p5/M, z28.s, z10.s\n" + ".inst 0xc0825438 // mova z24.s, p5/M, za0h.s[x14, #1]\n" + "fmin z27.s, p5/M, z27.s, z10.s\n" + "mov x13, #0x4\n" + "mov x12, #0x8\n" + ".inst 0xc082341e // mova z30.s, p5/M, za0h.s[x13]\n" + "fmin z24.s, p5/M, z24.s, z10.s\n" + ".inst 0xc082141d // mova z29.s, p5/M, za0h.s[x12]\n" + "fmax z31.s, p5/M, z31.s, z12.s\n" + "fmin z30.s, p5/M, z30.s, z10.s\n" + ".inst 0xc082343a // mova z26.s, p5/M, za0h.s[x13, #1]\n" + "fmin z29.s, p5/M, z29.s, z10.s\n" + "fmax z28.s, p5/M, z28.s, z12.s\n" + ".inst 0xc0821439 // mova z25.s, p5/M, za0h.s[x12, #1]\n" + "fmax z27.s, p5/M, z27.s, z12.s\n" + "fmin z26.s, p5/M, z26.s, z10.s\n" + ".inst 0xc0827457 // mova z23.s, p5/M, za0h.s[XZR, #2]\n" + "fmin z25.s, p5/M, z25.s, z10.s\n" + "fmax z24.s, p5/M, z24.s, z12.s\n" + ".inst 0xc0823456 // mova z22.s, p5/M, za0h.s[x13, #2]\n" + "fmax z30.s, p5/M, z30.s, z12.s\n" + "fmin z23.s, p5/M, z23.s, z10.s\n" + ".inst 0xc0821455 // mova z21.s, p5/M, za0h.s[x12, #2]\n" + "fmax z29.s, p5/M, z29.s, z12.s\n" + "fmin z22.s, p5/M, z22.s, z10.s\n" + ".inst 0xc0825454 // mova z20.s, p5/M, za0h.s[x14, #2]\n" + "fmax z26.s, p5/M, z26.s, z12.s\n" + "fmin z21.s, p5/M, z21.s, z10.s\n" + ".inst 0xc0827473 // mova z19.s, p5/M, za0h.s[XZR, #3]\n" + "fmax z25.s, p5/M, z25.s, z12.s\n" + "fmin z20.s, p5/M, z20.s, z10.s\n" + ".inst 0xc0823472 // mova z18.s, p5/M, za0h.s[x13, #3]\n" + "fmax z23.s, p5/M, z23.s, z12.s\n" + "fmin z19.s, p5/M, z19.s, z10.s\n" + ".inst 0xc0821471 // mova z17.s, p5/M, za0h.s[x12, #3]\n" + "fmax z22.s, p5/M, z22.s, z12.s\n" + "fmin z18.s, p5/M, z18.s, z10.s\n" + ".inst 0xc0825470 // mova z16.s, p5/M, za0h.s[x14, #3]\n" + "fmax z21.s, p5/M, z21.s, z12.s\n" + "fmin z17.s, p5/M, z17.s, z10.s\n" + "fmax z20.s, p5/M, z20.s, z12.s\n" + "fmin z16.s, p5/M, z16.s, z10.s\n" + "st1w { z31.s }, p0, [%x[output], x25, LSL #2]\n" + "fmax z19.s, p5/M, z19.s, z12.s\n" + "st1w { z30.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z18.s, p5/M, z18.s, z12.s\n" + "st1w { z29.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z17.s, p5/M, z17.s, z12.s\n" + "st1w { z28.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "fmax z16.s, p5/M, z16.s, z12.s\n" + "st1w { z27.s }, p0, [x8, x25, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z26.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z25.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z24.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z23.s }, p0, [x8, x25, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z22.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z21.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z20.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z19.s }, p0, [x8, x25, LSL #2]\n" + "st1w { z18.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z17.s }, p0, [x20, x25, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z16.s }, p0, [x20, x25, LSL #2]\n" + "whilelt p0.s, x24, %x[n_channels]\n" + "b.none 3f\n" + ".inst 0xc082749f // mova z31.s, p5/M, za1h.s[XZR]\n" + ".inst 0xc082349e // mova z30.s, p5/M, za1h.s[x13]\n" + "fmin z31.s, p5/M, z31.s, z10.s\n" + ".inst 0xc082149d // mova z29.s, p5/M, za1h.s[x12]\n" + "fmin z30.s, p5/M, z30.s, z10.s\n" + ".inst 0xc082549c // mova z28.s, p5/M, za1h.s[x14]\n" + "fmin z29.s, p5/M, z29.s, z10.s\n" + ".inst 0xc08274bb // mova z27.s, p5/M, za1h.s[XZR, #1]\n" + "fmin z28.s, p5/M, z28.s, z10.s\n" + ".inst 0xc08234ba // mova z26.s, p5/M, za1h.s[x13, #1]\n" + "fmax z31.s, p5/M, z31.s, z12.s\n" + "fmin z27.s, p5/M, z27.s, z10.s\n" + ".inst 0xc08214b9 // mova z25.s, p5/M, za1h.s[x12, #1]\n" + "fmax z30.s, p5/M, z30.s, z12.s\n" + "fmin z26.s, p5/M, z26.s, z10.s\n" + ".inst 0xc08254b8 // mova z24.s, p5/M, za1h.s[x14, #1]\n" + "fmax z29.s, p5/M, z29.s, z12.s\n" + "fmin z25.s, p5/M, z25.s, z10.s\n" + ".inst 0xc08274d7 // mova z23.s, p5/M, za1h.s[XZR, #2]\n" + "fmax z28.s, p5/M, z28.s, z12.s\n" + "fmin z24.s, p5/M, z24.s, z10.s\n" + ".inst 0xc08234d6 // mova z22.s, p5/M, za1h.s[x13, #2]\n" + "fmax z27.s, p5/M, z27.s, z12.s\n" + "fmin z23.s, p5/M, z23.s, z10.s\n" + ".inst 0xc08214d5 // mova z21.s, p5/M, za1h.s[x12, #2]\n" + "fmax z26.s, p5/M, z26.s, z12.s\n" + "fmin z22.s, p5/M, z22.s, z10.s\n" + "add x20, %x[output], %x[output_col_stride], LSL #2\n" + ".inst 0xc08254d4 // mova z20.s, p5/M, za1h.s[x14, #2]\n" + "fmax z25.s, p5/M, z25.s, z12.s\n" + "fmin z21.s, p5/M, z21.s, z10.s\n" + "add x8, %x[output], %x[output_row_stride], LSL #2\n" + ".inst 0xc08274f3 // mova z19.s, p5/M, za1h.s[XZR, #3]\n" + "fmax z24.s, p5/M, z24.s, z12.s\n" + "fmin z20.s, p5/M, z20.s, z10.s\n" + ".inst 0xc08234f2 // mova z18.s, p5/M, za1h.s[x13, #3]\n" + "fmax z23.s, p5/M, z23.s, z12.s\n" + "fmin z19.s, p5/M, z19.s, z10.s\n" + ".inst 0xc08214f1 // mova z17.s, p5/M, za1h.s[x12, #3]\n" + "fmax z22.s, p5/M, z22.s, z12.s\n" + "fmin z18.s, p5/M, z18.s, z10.s\n" + ".inst 0xc08254f0 // mova z16.s, p5/M, za1h.s[x14, #3]\n" + "fmax z21.s, p5/M, z21.s, z12.s\n" + "fmin z17.s, p5/M, z17.s, z10.s\n" + "fmax z20.s, p5/M, z20.s, z12.s\n" + "fmin z16.s, p5/M, z16.s, z10.s\n" + "st1w { z31.s }, p0, [%x[output], x24, LSL #2]\n" + "fmax z19.s, p5/M, z19.s, z12.s\n" + "st1w { z30.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z18.s, p5/M, z18.s, z12.s\n" + "st1w { z29.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z17.s, p5/M, z17.s, z12.s\n" + "st1w { z28.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "fmax z16.s, p5/M, z16.s, z12.s\n" + "st1w { z27.s }, p0, [x8, x24, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z26.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z25.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z24.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z23.s }, p0, [x8, x24, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z22.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z21.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z20.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z19.s }, p0, [x8, x24, LSL #2]\n" + "st1w { z18.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z17.s }, p0, [x20, x24, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z16.s }, p0, [x20, x24, LSL #2]\n" + "whilelt p0.s, x23, %x[n_channels]\n" + "b.none 3f\n" + ".inst 0xc082751f // mova z31.s, p5/M, za2h.s[XZR]\n" + ".inst 0xc082351e // mova z30.s, p5/M, za2h.s[x13]\n" + "fmin z31.s, p5/M, z31.s, z10.s\n" + ".inst 0xc082151d // mova z29.s, p5/M, za2h.s[x12]\n" + "fmin z30.s, p5/M, z30.s, z10.s\n" + ".inst 0xc082551c // mova z28.s, p5/M, za2h.s[x14]\n" + "fmin z29.s, p5/M, z29.s, z10.s\n" + ".inst 0xc082753b // mova z27.s, p5/M, za2h.s[XZR, #1]\n" + "fmin z28.s, p5/M, z28.s, z10.s\n" + ".inst 0xc082353a // mova z26.s, p5/M, za2h.s[x13, #1]\n" + "fmax z31.s, p5/M, z31.s, z12.s\n" + "fmin z27.s, p5/M, z27.s, z10.s\n" + ".inst 0xc0821539 // mova z25.s, p5/M, za2h.s[x12, #1]\n" + "fmax z30.s, p5/M, z30.s, z12.s\n" + "fmin z26.s, p5/M, z26.s, z10.s\n" + ".inst 0xc0825538 // mova z24.s, p5/M, za2h.s[x14, #1]\n" + "fmax z29.s, p5/M, z29.s, z12.s\n" + "fmin z25.s, p5/M, z25.s, z10.s\n" + ".inst 0xc0827557 // mova z23.s, p5/M, za2h.s[XZR, #2]\n" + "fmax z28.s, p5/M, z28.s, z12.s\n" + "fmin z24.s, p5/M, z24.s, z10.s\n" + ".inst 0xc0823556 // mova z22.s, p5/M, za2h.s[x13, #2]\n" + "fmax z27.s, p5/M, z27.s, z12.s\n" + "fmin z23.s, p5/M, z23.s, z10.s\n" + ".inst 0xc0821555 // mova z21.s, p5/M, za2h.s[x12, #2]\n" + "fmax z26.s, p5/M, z26.s, z12.s\n" + "fmin z22.s, p5/M, z22.s, z10.s\n" + "add x20, %x[output], %x[output_col_stride], LSL #2\n" + ".inst 0xc0825554 // mova z20.s, p5/M, za2h.s[x14, #2]\n" + "fmax z25.s, p5/M, z25.s, z12.s\n" + "fmin z21.s, p5/M, z21.s, z10.s\n" + "add x8, %x[output], %x[output_row_stride], LSL #2\n" + ".inst 0xc0827573 // mova z19.s, p5/M, za2h.s[XZR, #3]\n" + "fmax z24.s, p5/M, z24.s, z12.s\n" + "fmin z20.s, p5/M, z20.s, z10.s\n" + ".inst 0xc0823572 // mova z18.s, p5/M, za2h.s[x13, #3]\n" + "fmax z23.s, p5/M, z23.s, z12.s\n" + "fmin z19.s, p5/M, z19.s, z10.s\n" + ".inst 0xc0821571 // mova z17.s, p5/M, za2h.s[x12, #3]\n" + "fmax z22.s, p5/M, z22.s, z12.s\n" + "fmin z18.s, p5/M, z18.s, z10.s\n" + ".inst 0xc0825570 // mova z16.s, p5/M, za2h.s[x14, #3]\n" + "fmax z21.s, p5/M, z21.s, z12.s\n" + "fmin z17.s, p5/M, z17.s, z10.s\n" + "fmax z20.s, p5/M, z20.s, z12.s\n" + "fmin z16.s, p5/M, z16.s, z10.s\n" + "st1w { z31.s }, p0, [%x[output], x23, LSL #2]\n" + "fmax z19.s, p5/M, z19.s, z12.s\n" + "st1w { z30.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z18.s, p5/M, z18.s, z12.s\n" + "st1w { z29.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z17.s, p5/M, z17.s, z12.s\n" + "st1w { z28.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "fmax z16.s, p5/M, z16.s, z12.s\n" + "st1w { z27.s }, p0, [x8, x23, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z26.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z25.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z24.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z23.s }, p0, [x8, x23, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z22.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z21.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z20.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z19.s }, p0, [x8, x23, LSL #2]\n" + "st1w { z18.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z17.s }, p0, [x20, x23, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z16.s }, p0, [x20, x23, LSL #2]\n" + "whilelt p0.s, x22, %x[n_channels]\n" + "b.none 3f\n" + "fmov z1.s, #1.0\n" + ".inst 0xc082759f // mova z31.s, p5/M, za3h.s[XZR]\n" + ".inst 0xc082359e // mova z30.s, p5/M, za3h.s[x13]\n" + "fmin z31.s, p5/M, z31.s, z10.s\n" + ".inst 0xc082159d // mova z29.s, p5/M, za3h.s[x12]\n" + "fmin z30.s, p5/M, z30.s, z10.s\n" + ".inst 0xc082559c // mova z28.s, p5/M, za3h.s[x14]\n" + "fmin z29.s, p5/M, z29.s, z10.s\n" + ".inst 0xc08275bb // mova z27.s, p5/M, za3h.s[XZR, #1]\n" + "fmin z28.s, p5/M, z28.s, z10.s\n" + ".inst 0xc08235ba // mova z26.s, p5/M, za3h.s[x13, #1]\n" + "fmax z31.s, p5/M, z31.s, z12.s\n" + "fmin z27.s, p5/M, z27.s, z10.s\n" + ".inst 0xc08215b9 // mova z25.s, p5/M, za3h.s[x12, #1]\n" + "fmax z30.s, p5/M, z30.s, z12.s\n" + "fmin z26.s, p5/M, z26.s, z10.s\n" + ".inst 0xc08255b8 // mova z24.s, p5/M, za3h.s[x14, #1]\n" + "fmax z29.s, p5/M, z29.s, z12.s\n" + "fmin z25.s, p5/M, z25.s, z10.s\n" + ".inst 0xc08275d7 // mova z23.s, p5/M, za3h.s[XZR, #2]\n" + "fmax z28.s, p5/M, z28.s, z12.s\n" + "fmin z24.s, p5/M, z24.s, z10.s\n" + ".inst 0xc08235d6 // mova z22.s, p5/M, za3h.s[x13, #2]\n" + "fmax z27.s, p5/M, z27.s, z12.s\n" + "fmin z23.s, p5/M, z23.s, z10.s\n" + ".inst 0xc08215d5 // mova z21.s, p5/M, za3h.s[x12, #2]\n" + "fmax z26.s, p5/M, z26.s, z12.s\n" + "fmin z22.s, p5/M, z22.s, z10.s\n" + ".inst 0xc08255d4 // mova z20.s, p5/M, za3h.s[x14, #2]\n" + "fmax z25.s, p5/M, z25.s, z12.s\n" + "fmin z21.s, p5/M, z21.s, z10.s\n" + "add x20, %x[output], %x[output_col_stride], LSL #2\n" + ".inst 0xc08275f3 // mova z19.s, p5/M, za3h.s[XZR, #3]\n" + "fmax z24.s, p5/M, z24.s, z12.s\n" + "fmin z20.s, p5/M, z20.s, z10.s\n" + "add x8, %x[output], %x[output_row_stride], LSL #2\n" + ".inst 0xc08235f2 // mova z18.s, p5/M, za3h.s[x13, #3]\n" + "fmax z23.s, p5/M, z23.s, z12.s\n" + "fmin z19.s, p5/M, z19.s, z10.s\n" + "incw x25, ALL, MUL #4\n" + ".inst 0xc08215f1 // mova z17.s, p5/M, za3h.s[x12, #3]\n" + "fmax z22.s, p5/M, z22.s, z12.s\n" + "fmin z18.s, p5/M, z18.s, z10.s\n" + "incw x24, ALL, MUL #4\n" + ".inst 0xc08255f0 // mova z16.s, p5/M, za3h.s[x14, #3]\n" + "fmax z21.s, p5/M, z21.s, z12.s\n" + "fmin z17.s, p5/M, z17.s, z10.s\n" + "incw x23, ALL, MUL #4\n" + ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n" + "fmax z20.s, p5/M, z20.s, z12.s\n" + "fmin z16.s, p5/M, z16.s, z10.s\n" + "add x21, %x[inptr], %x[matrix_stride], LSL #2\n" + "fmax z19.s, p5/M, z19.s, z12.s\n" + "st1w { z31.s }, p0, [%x[output], x22, LSL #2]\n" + "fmax z18.s, p5/M, z18.s, z12.s\n" + "st1w { z30.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z17.s, p5/M, z17.s, z12.s\n" + "st1w { z29.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "fmax z16.s, p5/M, z16.s, z12.s\n" + "st1w { z28.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z27.s }, p0, [x8, x22, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z26.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z25.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z24.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z23.s }, p0, [x8, x22, LSL #2]\n" + "add x8, x8, %x[output_row_stride], LSL #2\n" + "st1w { z22.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z21.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z20.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x8, %x[output_col_stride], LSL #2\n" + "st1w { z19.s }, p0, [x8, x22, LSL #2]\n" + "st1w { z18.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z17.s }, p0, [x20, x22, LSL #2]\n" + "add x20, x20, %x[output_col_stride], LSL #2\n" + "st1w { z16.s }, p0, [x20, x22, LSL #2]\n" + "incw x22, ALL, MUL #4\n" + "whilelt p1.s, x22, %x[n_channels]\n" + "ld1w { z28.s }, p1/Z, [%x[inptr], x22, LSL #2]\n" + "ld1w { z24.s }, p1/Z, [x21, x22, LSL #2]\n" + "whilelt p2.s, x23, %x[n_channels]\n" + "whilelt p3.s, x24, %x[n_channels]\n" + "ld1w { z30.s }, p3/Z, [%x[inptr], x24, LSL #2]\n" + "whilelt p4.s, x25, %x[n_channels]\n" + "ld1w { z31.s }, p4/Z, [%x[inptr], x25, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p4.b\n" + "ld1w { z29.s }, p2/Z, [%x[inptr], x23, LSL #2]\n" + "ld1w { z27.s }, p4/Z, [x21, x25, LSL #2]\n" + "ld1w { z26.s }, p3/Z, [x21, x24, LSL #2]\n" + "ld1w { z25.s }, p2/Z, [x21, x23, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + "ld1w { z23.s }, p4/Z, [x21, x25, LSL #2]\n" + "ld1w { z22.s }, p3/Z, [x21, x24, LSL #2]\n" + "ld1w { z21.s }, p2/Z, [x21, x23, LSL #2]\n" + "ld1w { z20.s }, p1/Z, [x21, x22, LSL #2]\n" + "add x21, x21, %x[matrix_stride], LSL #2\n" + "ld1w { z19.s }, p4/Z, [x21, x25, LSL #2]\n" + "ld1w { z18.s }, p3/Z, [x21, x24, LSL #2]\n" + "ld1w { z17.s }, p2/Z, [x21, x23, LSL #2]\n" + "ld1w { z16.s }, p1/Z, [x21, x22, LSL #2]\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x25, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p3.b\n" + ".inst 0x8080b420 // fmopa za0.s, p5/M, p5/M, z1.s, z0.s\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x24, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p2.b\n" + ".inst 0x8080b421 // fmopa za1.s, p5/M, p5/M, z1.s, z0.s\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x23, LSL #2]\n" + "and p0.b, p5/Z, p8.b, p1.b\n" + ".inst 0x8080b422 // fmopa za2.s, p5/M, p5/M, z1.s, z0.s\n" + "ld1w { z0.s }, p0/Z, [%x[bptr], x22, LSL #2]\n" + ".inst 0x8080b423 // fmopa za3.s, p5/M, p5/M, z1.s, z0.s\n" + "b.any 2b\n" + "3:" // End + ".inst 0xd503467f // SMSTOP\n" + : + : [bptr] "r" (bptr), [inptr] "r" (inptr), [matrix_stride] "r" (matrix_stride), [n_channels] "r" (n_channels), [offsetof_Params_act_max] "I" (offsetof(Params, act_max)), [offsetof_Params_act_min] "I" (offsetof(Params, act_min)), [offsetof_Params_inner_terms] "I" (offsetof(Params, inner_terms)), [offsetof_Params_outer_terms] "I" (offsetof(Params, outer_terms)), [output] "r" (output), [output_col_stride] "r" (output_col_stride), [output_row_stride] "r" (output_row_stride), [params] "r" (¶ms) + : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p8", "x12", "x13", "x14", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); +} + +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv + +#endif // defined(ARM_COMPUTE_ENABLE_SME) diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms_fp16.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp16.cpp new file mode 100644 index 0000000000..c39b1dc083 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp16.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#include "output_transform.hpp" +#include "winograd_implementations.hpp" + +namespace arm_conv { +namespace winograd { +namespace output_transform { + +void a64_fp16_4x4_3x3(unsigned int, const __fp16 *, size_t, const __fp16 *, __fp16 *, size_t, size_t, __fp16, __fp16); + +#define IMPL(OUT_HEIGHT, OUT_WIDTH, KERN_HEIGHT, KERN_WIDTH, FUNC, DRIVER) \ + new Transform ## DRIVER <__fp16, __fp16>(#FUNC, OUT_HEIGHT, OUT_WIDTH, KERN_HEIGHT, KERN_WIDTH, FUNC) + + +static const TransformImplementation<__fp16> transforms_fp16[] = { + { IMPL(4, 4, 3, 3, a64_fp16_4x4_3x3, Unpadded) }, + { nullptr } +}; + +template <> +const TransformImplementation<__fp16> *implementation_list(void) +{ + return transforms_fp16; +} + +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv + +#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
\ No newline at end of file diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp new file mode 100644 index 0000000000..0a7030324e --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "output_transform.hpp" +#include "winograd_implementations.hpp" + +namespace arm_conv { +namespace winograd { +namespace output_transform { + +#if defined(__aarch64__) +#if defined(ARM_COMPUTE_ENABLE_SME) +void sme_fp32_mopa_4x4_3x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); +#endif // defined(ARM_COMPUTE_ENABLE_SME) +#endif // defined(__aarch64__) +void arm_fp32_4x4_3x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); +void arm_fp32_2x2_3x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); +void arm_fp32_2x2_5x5(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); +void arm_fp32_1x6_1x3(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); +void arm_fp32_1x4_1x5(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); +void arm_fp32_1x2_1x7(unsigned int, const float *, size_t, const float *, float *, size_t, size_t, float, float); + +#define IMPL(OUT_HEIGHT, OUT_WIDTH, KERN_HEIGHT, KERN_WIDTH, FUNC, DRIVER) \ + new Transform ## DRIVER <float, float>(#FUNC, OUT_HEIGHT, OUT_WIDTH, KERN_HEIGHT, KERN_WIDTH, FUNC) + +#define IMPL_T(OUT_HEIGHT, OUT_WIDTH, KERN_HEIGHT, KERN_WIDTH, FUNC, DRIVER) \ + new Transform ## DRIVER <float, float>(#FUNC, OUT_HEIGHT, OUT_WIDTH, KERN_HEIGHT, KERN_WIDTH, Transform ## DRIVER <float, float>::get_transposed_kernel(FUNC)) + +static const TransformImplementation<float> transforms_fp32[] = { +#if defined(__aarch64__) +#if defined(ARM_COMPUTE_ENABLE_SME) + { IMPL(4, 4, 3, 3, sme_fp32_mopa_4x4_3x3, Unpadded), MethodConstraints::RequiresSME }, +#endif // defined(ARM_COMPUTE_ENABLE_SME) +#endif // defined(__aarch64__) + { IMPL(4, 4, 3, 3, arm_fp32_4x4_3x3, Unpadded), MethodConstraints::LargerShape }, + { IMPL(2, 2, 3, 3, arm_fp32_2x2_3x3, Unpadded) }, + { IMPL(2, 2, 5, 5, arm_fp32_2x2_5x5, Unpadded) }, + { IMPL(1, 6, 1, 3, arm_fp32_1x6_1x3, Unpadded) }, + { IMPL_T(6, 1, 3, 1, arm_fp32_1x6_1x3, Unpadded) }, + { IMPL(1, 4, 1, 5, arm_fp32_1x4_1x5, Unpadded) }, + { IMPL_T(4, 1, 5, 1, arm_fp32_1x4_1x5, Unpadded) }, + { IMPL(1, 2, 1, 7, arm_fp32_1x2_1x7, Unpadded) }, + { IMPL_T(2, 1, 7, 1, arm_fp32_1x2_1x7, Unpadded) }, + { nullptr } +}; + +template <> +const TransformImplementation<float> *implementation_list(void) +{ + return transforms_fp32; +} + +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/padding.cpp b/src/core/NEON/kernels/convolution/winograd/padding.cpp deleted file mode 100644 index 1d44c384d9..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/padding.cpp +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include <cstring> -#include <cstdint> - -#include "padding.hpp" - -namespace padding -{ - -template <typename T> -void copy_and_pad_tile( - const unsigned int tile_rows, - const unsigned int tile_cols, - const unsigned int n_channels, - const T* const inptr, - const unsigned int in_row_stride, - const unsigned int in_col_stride, - T* const outptr, - const unsigned int out_row_stride, - const unsigned int out_col_stride, - const unsigned int pad_top, - const unsigned int pad_left, - const unsigned int pad_bottom, - const unsigned int pad_right, - const T pad_value -) -{ - for (unsigned int out_i = 0; out_i < tile_rows; out_i++) - { - for (unsigned int out_j = 0; out_j < tile_cols; out_j++) - { - T* const output = outptr + out_i*out_row_stride + out_j*out_col_stride; - - if (out_i < pad_top || tile_rows - pad_bottom <= out_i || - out_j < pad_left || tile_cols - pad_right <= out_j) - { - for (unsigned int n = 0; n < n_channels; n++) - { - output[n] = pad_value; - } - } - else - { - const auto in_i = out_i - pad_top, in_j = out_j - pad_left; - const T* const input = inptr + in_i*in_row_stride + in_j*in_col_stride; - std::memcpy(output, input, n_channels * sizeof(T)); - } - } - } -} - -template void copy_and_pad_tile( - unsigned int, unsigned int, unsigned int, - const uint8_t *, unsigned int, unsigned int, - uint8_t *, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, unsigned int, uint8_t -); - -template void copy_and_pad_tile( - unsigned int, unsigned int, unsigned int, - const float *, unsigned int, unsigned int, - float *, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, unsigned int, float -); - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template void copy_and_pad_tile( - unsigned int, unsigned int, unsigned int, - const __fp16 *, unsigned int, unsigned int, - __fp16 *, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, unsigned int, __fp16 -); -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - -template <unsigned int TileRows, unsigned int TileCols> -void CopyCropped<TileRows, TileCols>::execute( - const size_t size, - const void * const inptr, - const size_t in_row_stride, - const size_t in_col_stride, - void * const outptr, - const size_t out_row_stride, - const size_t out_col_stride, - const unsigned int pad_top, - const unsigned int pad_left, - const unsigned int pad_bottom, - const unsigned int pad_right -) -{ - for (unsigned int out_i = 0, in_i = pad_top; in_i < TileRows - pad_bottom; out_i++, in_i++) - { - for (unsigned int out_j = 0, in_j = pad_left; in_j < TileCols - pad_right; out_j++, in_j++) - { - std::memcpy( - static_cast<uint8_t *>(outptr) + out_i*out_row_stride + out_j*out_col_stride, - static_cast<const uint8_t *>(inptr) + in_i*in_row_stride + in_j*in_col_stride, - size - ); - } - } -} - -template class CopyCropped<2, 2>; -template class CopyCropped<3, 3>; -template class CopyCropped<4, 4>; - -template <typename T> -void crop_and_copy_tile( - unsigned int tile_rows, - unsigned int tile_cols, - unsigned int n_channels, - const T *inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - T *outptr, - unsigned int out_row_stride, - unsigned int out_col_stride, - unsigned int crop_top, - unsigned int crop_left, - unsigned int crop_bottom, - unsigned int crop_right -) -{ - for (unsigned int out_i = 0, in_i = crop_top; in_i < tile_rows - crop_bottom; out_i++, in_i++) - { - for (unsigned int out_j = 0, in_j = crop_left; in_j < tile_cols - crop_right; out_j++, in_j++) - { - std::memcpy( - outptr + out_i*out_row_stride + out_j*out_col_stride, - inptr + in_i*in_row_stride + in_j*in_col_stride, - sizeof(T) * n_channels - ); - } - } -} - -template void crop_and_copy_tile( - unsigned int tile_rows, - unsigned int tile_cols, - unsigned int n_channels, - const float *inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - float *outptr, - unsigned int out_row_stride, - unsigned int out_col_stride, - unsigned int crop_top, - unsigned int crop_left, - unsigned int crop_bottom, - unsigned int crop_right -); - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template void crop_and_copy_tile( - unsigned int tile_rows, - unsigned int tile_cols, - unsigned int n_channels, - const __fp16 *inptr, - unsigned int in_row_stride, - unsigned int in_col_stride, - __fp16 *outptr, - unsigned int out_row_stride, - unsigned int out_col_stride, - unsigned int crop_top, - unsigned int crop_left, - unsigned int crop_bottom, - unsigned int crop_right -); -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -} // namespace padding diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transform.hpp b/src/core/NEON/kernels/convolution/winograd/weight_transform.hpp new file mode 100644 index 0000000000..5569bc1b89 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transform.hpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "winograd.hpp" +#include <algorithm> +#include <functional> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +/* Driver class for the Winograd weight transforms. + */ +template <typename TIn, typename TOut=TIn> +class Transform : public ITransform +{ + using Kernel = std::function<void( + unsigned int n_channels, // Number of channels to transform + const TIn *inptr, size_t ld_in_row, size_t ld_in_col, + TOut *outptr, size_t ld_out_matrix + )>; + + const std::string m_name; + const unsigned int m_kernel_rows, m_kernel_cols; + const unsigned int m_transformed_tile_rows, m_transformed_tile_cols; + const Kernel m_kernel; + + void execute_internal( + const ConvolutionArgs &args, + const TIn *inptr, size_t ld_in_row, size_t ld_in_col, size_t ld_input_channel, + TOut *outptr, size_t ld_out_matrix, size_t ld_out_row, + unsigned int thread_id, unsigned int n_threads + ) const + { + // Stripe groups of input channels over threads, this should reduce false + // sharing of the output matrix. + constexpr auto n_input_channels_per_thread = 16u; + + // Get the initial offset for the input and output pointers + const auto offset = thread_id * n_input_channels_per_thread; + inptr += offset * ld_input_channel; + outptr += offset * ld_out_row; + + for (auto start_ic = thread_id * n_input_channels_per_thread; + start_ic < args.n_input_channels; + start_ic += n_threads * n_input_channels_per_thread) + { + // Now iterate over the input channels assigned to this thread. + const auto end_ic = std::min(args.n_input_channels, + start_ic + n_input_channels_per_thread); + for (auto ic = start_ic; ic < end_ic; ic++) + { + m_kernel(args.n_output_channels, inptr, ld_in_row, ld_in_col, + outptr, ld_out_matrix); + inptr += ld_input_channel; + outptr += ld_out_row; + } + + // Progress the pointers to the account for the work not performed by + // this thread. + const auto skip = (n_threads - 1) * n_input_channels_per_thread; + inptr += skip * ld_input_channel; + outptr += skip * ld_out_row; + } + } + + public: + Transform( + const std::string &name, + unsigned int kernel_rows, unsigned int kernel_cols, + unsigned int transformed_tile_rows, unsigned int transformed_tile_cols, + const Kernel kernel + ) + : m_name(name), + m_kernel_rows(kernel_rows), m_kernel_cols(kernel_cols), + m_transformed_tile_rows(transformed_tile_rows), m_transformed_tile_cols(transformed_tile_cols), + m_kernel(kernel) + { + } + + const std::string &get_name(void) const override { return m_name; } + + unsigned int get_kernel_rows(void) const override { return m_kernel_rows; } + unsigned int get_kernel_cols(void) const override { return m_kernel_cols; } + + unsigned int get_transformed_tile_rows(void) const override { return m_transformed_tile_rows; } + unsigned int get_transformed_tile_cols(void) const override { return m_transformed_tile_cols; } + + void execute( + const ConvolutionArgs &args, + const void *inptr, size_t ld_in_row, size_t ld_in_col, size_t ld_input_channel, + void *outptr, size_t ld_out_matrix, size_t ld_out_row, + unsigned int thread_id, unsigned int n_threads + ) const override + { + execute_internal( + args, + reinterpret_cast<const TIn *>(inptr), ld_in_row, ld_in_col, ld_input_channel, + reinterpret_cast<TOut *>(outptr), ld_out_matrix, ld_out_row, + thread_id, n_threads + ); + } + + /* Utility method to get a transposed variant of a kernel, this transposed + * version simply calls the original kernel with the input row and column + * strides swapped. + */ + static constexpr Kernel get_transposed_kernel(const Kernel &kernel) + { + return [kernel] ( + const unsigned int n_channels, + const TIn *const inptr, const size_t ld_in_row, const size_t ld_in_col, + TOut *const outptr, const size_t ld_out + ) { + kernel(n_channels, inptr, ld_in_col, ld_in_row, outptr, ld_out); + }; + } +}; + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/a64_fp16_4x4_3x3.cpp index 3101865027..0d9a65890e 100644 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp16_fp16_integers.cpp +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/a64_fp16_4x4_3x3.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,45 +21,26 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const __fp16* const input, // NOTE: Data in HWIO order - __fp16* const output, - const int matrix_stride, - const int matrix_row_stride +#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#include <cstddef> +#include <arm_neon.h> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void a64_fp16_4x4_3x3( + unsigned int n_channels, + const __fp16* inptr, // NOTE: Data in HWIO order + const size_t ld_weight_row, + const size_t ld_weight_col, + __fp16* outptr, + const size_t matrix_stride ) { - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const auto weight_row_stride = 3 * weight_col_stride; - const __fp16 *inptrs[3][3]; - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride; - } - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - __fp16 *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; #ifdef __aarch64__ - for (; channels_remaining >= 8; channels_remaining -= 8) + for (; n_channels >= 8; n_channels -= 8) { // Matrices used and computed in this kernel float16x8_t w[3][3], Ww[6][3], V[6][6]; @@ -69,8 +50,7 @@ void WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>::execu { for (int j = 0; j < 3; j++) { - w[i][j] = vld1q_f16(inptrs[i][j]); - inptrs[i][j] += 8; + w[i][j] = vld1q_f16(inptr + i*ld_weight_row + j*ld_weight_col); } } @@ -128,11 +108,12 @@ void WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>::execu vst1q_f16(outptr + m*matrix_stride, V[i][j]); } } + inptr += 8; outptr += 8; } #endif // __aarch64__ #ifdef __arm_any__ - for (; channels_remaining >= 4; channels_remaining -= 4) + for (; n_channels >= 4; n_channels -= 4) { // Matrices used and computed in this kernel float16x4_t w[3][3], Ww[6][3], V[6][6]; @@ -142,8 +123,7 @@ void WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>::execu { for (int j = 0; j < 3; j++) { - w[i][j] = vld1_f16(inptrs[i][j]); - inptrs[i][j] += 4; + w[i][j] = vld1_f16(inptr + i*ld_weight_row + j*ld_weight_col); } } @@ -201,59 +181,62 @@ void WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>::execu vst1_f16(outptr + m*matrix_stride, V[i][j]); } } + inptr += 4; outptr += 4; } #endif // __arm_any__ - for (; channels_remaining; channels_remaining--) + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + __fp16 w[3][3], Ww[6][3], V[6][6]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) { - // Matrices used and computed in this kernel - __fp16 w[3][3], Ww[6][3], V[6][6]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = *(inptrs[i][j]++); - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - Ww[0][j] = 6*w[0][j]; - Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; - Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; - Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; - Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; - Ww[5][j] = 24*w[2][j]; - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - V[i][0] = ( 6*Ww[i][0]) / 576.0; - V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0; - V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0; - V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0; - V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0; - V[i][5] = (24*Ww[i][2]) / 576.0; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - *(outptr + m*matrix_stride) = V[i][j]; - } - } - outptr++; + w[i][j] = *(inptr + i*ld_weight_row + j*ld_weight_col); } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + Ww[0][j] = 6*w[0][j]; + Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; + Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; + Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; + Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; + Ww[5][j] = 24*w[2][j]; + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + V[i][0] = ( 6*Ww[i][0]) / 576.0; + V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0; + V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0; + V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0; + V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0; + V[i][5] = (24*Ww[i][2]) / 576.0; + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + *(outptr + m*matrix_stride) = V[i][j]; + } + } + + inptr++; + outptr++; } } -template class WeightTransform<3, 3, 6, 6, __fp16, __fp16, WinogradRoots::Integers>; +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv -} // namespace -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_2x2_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_2x2_3x3.cpp new file mode 100644 index 0000000000..ebfe03e6d9 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_2x2_3x3.cpp @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <cstddef> +#include <arm_neon.h> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void arm_fp32_2x2_3x3( + unsigned int n_channels, + const float *inptr, size_t ld_weight_row, size_t ld_weight_col, + float *outptr, size_t matrix_stride +) +{ + constexpr auto inner_tile_i = 4u; + constexpr auto inner_tile_j = 4u; + +#ifdef __aarch64__ + // For each output channel + for (; n_channels >= 4u; n_channels -= 4) + { + // Matrices used and computed in this kernel + float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + w[i][j] = vld1q_f32(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + Ww[0][j] = w[0][j]; + + // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]); + Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); + + // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]); + Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); + + Ww[3][j] = w[2][j]; + } + + // Compute V = W w WT + for (auto i = 0u; i < inner_tile_i; i++) + { + V[i][0] = Ww[i][0]; + + // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]); + V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); + + // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]); + V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); + + V[i][3] = Ww[i][2]; + } + + // Store the transformed weights + for (auto i = 0u, m = 0u; i < inner_tile_i; i++) + { + for (auto j = 0u; j < inner_tile_j; j++, m++) + { + vst1q_f32(outptr + m*matrix_stride, V[i][j]); + } + } + + inptr += 4; + outptr += 4; + } +#endif // __aarch64__ + for (; n_channels >= 2u; n_channels -= 2) + { + // Matrices used and computed in this kernel + float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + w[i][j] = vld1_f32(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + Ww[0][j] = w[0][j]; + + // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]); + Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); + + // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]); + Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); + + Ww[3][j] = w[2][j]; + } + + // Compute V = W w WT + for (auto i = 0u; i < inner_tile_i; i++) + { + V[i][0] = Ww[i][0]; + + // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]); + V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); + + // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]); + V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); + + V[i][3] = Ww[i][2]; + } + + // Store the transformed weights + for (auto i = 0u, m = 0u; i < inner_tile_i; i++) + { + for (auto j = 0u; j < inner_tile_j; j++, m++) + { + vst1_f32(outptr + m*matrix_stride, V[i][j]); + } + } + + inptr += 2; + outptr += 2; + } + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + w[i][j] = *(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + Ww[0][j] = w[0][j]; + Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]); + Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]); + Ww[3][j] = w[2][j]; + } + + // Compute V = W w WT + for (auto i = 0u; i < inner_tile_i; i++) + { + V[i][0] = Ww[i][0]; + V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]); + V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]); + V[i][3] = Ww[i][2]; + } + + // Store the transformed weights + for (auto i = 0u, m = 0u; i < inner_tile_i; i++) + { + for (auto j = 0u; j < inner_tile_j; j++, m++) + { + *(outptr + m*matrix_stride) = V[i][j]; + } + } + + inptr++; + outptr++; + } +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_2x2_5x5.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_2x2_5x5.cpp new file mode 100644 index 0000000000..3b09218646 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_2x2_5x5.cpp @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <cstddef> +#include <arm_neon.h> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void arm_fp32_2x2_5x5( + unsigned int n_channels, + const float *inptr, const size_t ld_weight_row, const size_t ld_weight_col, + float *outptr, const size_t matrix_stride +) +{ +#ifdef __aarch64__ + // For each output channel + for (; n_channels >= 4; n_channels -= 4) + { + // Matrices used and computed in this kernel + float32x4_t w[5][5], Ww[6][5], V[6][6]; + + // Read weights + for (int i = 0; i < 5; i++) + { + for (int j = 0; j < 5; j++) + { + w[i][j] = vld1q_f32(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 5; j++) + { + // Ww[0][j] = w[0][j]/4.0f; + Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f); + + // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f; + Ww[1][j] = vmulq_n_f32( + vaddq_f32( + vaddq_f32( + vaddq_f32(w[1][j], w[0][j]), + vaddq_f32(w[3][j], w[2][j]) + ), + w[4][j] + ), + -1.0f/6.0f + ); + + // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f; + // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f; + Ww[2][j] = vmulq_n_f32( + vsubq_f32( + vaddq_f32( + vsubq_f32(w[1][j], w[0][j]), + vsubq_f32(w[3][j], w[2][j]) + ), + w[4][j] + ), + 1.0f/6.0f + ); + + // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f; + Ww[3][j] = vmulq_n_f32( + vmlaq_n_f32( + vaddq_f32( + vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)), + vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) + ), + w[4][j], 2.0f + ), + 1.0f/3.0f + ); + + // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f; + Ww[4][j] = vmulq_n_f32( + vmlaq_n_f32( + vaddq_f32( + vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)), + vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) + ), + w[4][j], 2.0f + ), + 1.0f/3.0f + ); + + // Ww[5][j] = w[4][j]; + Ww[5][j] = w[4][j]; + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + // V[i][0] = Ww[i][0]/4.0f; + V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f); + + // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f; + V[i][1] = vmulq_n_f32( + vaddq_f32( + vaddq_f32( + vaddq_f32(Ww[i][1], Ww[i][0]), + vaddq_f32(Ww[i][3], Ww[i][2]) + ), + Ww[i][4] + ), + -1.0f/6.0f + ); + + // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f; + // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f; + V[i][2] = vmulq_n_f32( + vsubq_f32( + vaddq_f32( + vsubq_f32(Ww[i][1], Ww[i][0]), + vsubq_f32(Ww[i][3], Ww[i][2]) + ), + Ww[i][4] + ), + 1.0f/6.0f + ); + + // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f; + V[i][3] = vmulq_n_f32( + vmlaq_n_f32( + vaddq_f32( + vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)), + vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) + ), + Ww[i][4], 2.0f + ), + 1.0f/3.0f + ); + + // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f; + V[i][4] = vmulq_n_f32( + vmlaq_n_f32( + vaddq_f32( + vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)), + vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) + ), + Ww[i][4], 2.0f + ), + 1.0f/3.0f + ); + + // V[i][5] = Ww[i][4]; + V[i][5] = Ww[i][4]; + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + vst1q_f32(outptr + m*matrix_stride, V[i][j]); + } + } + + inptr += 4; + outptr += 4; + } +#endif // __aarch64__ + for (; n_channels >= 2; n_channels -= 2) + { + // Matrices used and computed in this kernel + float32x2_t w[5][5], Ww[6][5], V[6][6]; + + // Read weights + for (int i = 0; i < 5; i++) + { + for (int j = 0; j < 5; j++) + { + w[i][j] = vld1_f32(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 5; j++) + { + // Ww[0][j] = w[0][j]/4.0f; + Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f); + + // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f; + Ww[1][j] = vmul_n_f32( + vadd_f32( + vadd_f32( + vadd_f32(w[1][j], w[0][j]), + vadd_f32(w[3][j], w[2][j]) + ), + w[4][j] + ), + -1.0f/6.0f + ); + + // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f; + // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f; + Ww[2][j] = vmul_n_f32( + vsub_f32( + vadd_f32( + vsub_f32(w[1][j], w[0][j]), + vsub_f32(w[3][j], w[2][j]) + ), + w[4][j] + ), + 1.0f/6.0f + ); + + // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f; + Ww[3][j] = vmul_n_f32( + vmla_n_f32( + vadd_f32( + vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)), + vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) + ), + w[4][j], 2.0f + ), + 1.0f/3.0f + ); + + // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f; + Ww[4][j] = vmul_n_f32( + vmla_n_f32( + vadd_f32( + vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)), + vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) + ), + w[4][j], 2.0f + ), + 1.0f/3.0f + ); + + // Ww[5][j] = w[4][j]; + Ww[5][j] = w[4][j]; + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + // V[i][0] = Ww[i][0]/4.0f; + V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f); + + // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f; + V[i][1] = vmul_n_f32( + vadd_f32( + vadd_f32( + vadd_f32(Ww[i][1], Ww[i][0]), + vadd_f32(Ww[i][3], Ww[i][2]) + ), + Ww[i][4] + ), + -1.0f/6.0f + ); + + // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f; + // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f; + V[i][2] = vmul_n_f32( + vsub_f32( + vadd_f32( + vsub_f32(Ww[i][1], Ww[i][0]), + vsub_f32(Ww[i][3], Ww[i][2]) + ), + Ww[i][4] + ), + 1.0f/6.0f + ); + + // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f; + V[i][3] = vmul_n_f32( + vmla_n_f32( + vadd_f32( + vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)), + vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) + ), + Ww[i][4], 2.0f + ), + 1.0f/3.0f + ); + + // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f; + V[i][4] = vmul_n_f32( + vmla_n_f32( + vadd_f32( + vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)), + vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) + ), + Ww[i][4], 2.0f + ), + 1.0f/3.0f + ); + + // V[i][5] = Ww[i][4]; + V[i][5] = Ww[i][4]; + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + vst1_f32(outptr + m*matrix_stride, V[i][j]); + } + } + + inptr += 2; + outptr += 2; + } + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + float w[5][5], Ww[6][5], V[6][6]; + + // Read weights + for (int i = 0; i < 5; i++) + { + for (int j = 0; j < 5; j++) + { + w[i][j] = *(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 5; j++) + { + Ww[0][j] = w[0][j]/4.0f; + Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f; + Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f; + Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f; + Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f; + Ww[5][j] = w[4][j]; + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + V[i][0] = Ww[i][0]/4.0f; + V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f; + V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f; + V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f; + V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f; + V[i][5] = Ww[i][4]; + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + *(outptr + m*matrix_stride) = V[i][j]; + } + } + + inptr++; + outptr++; + } +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_4x4_3x3.cpp new file mode 100644 index 0000000000..aad88caff8 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/arm_fp32_4x4_3x3.cpp @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <cstddef> +#include <arm_neon.h> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void arm_fp32_4x4_3x3( + unsigned int n_channels, + const float *inptr, const size_t ld_weight_row, const size_t ld_weight_col, + float *outptr, const size_t matrix_stride +) +{ +#ifdef __aarch64__ + for (; n_channels >= 4; n_channels -= 4) + { + // Matrices used and computed in this kernel + float32x4_t w[3][3], Ww[6][3], V[6][6]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + w[i][j] = vld1q_f32(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + // Ww[0][j] = 6*w[0][j]; + Ww[0][j] = vmulq_n_f32(w[0][j], 6.0); + + // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; + Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0); + + // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; + Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0); + + // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; + Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); + + // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; + Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); + + // Ww[5][j] = 24*w[2][j]; + Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f); + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + const float recip576 = 1.0f / 576.0f; + + // V[i][0] = 6*Ww[i][0]; + V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576); + + // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]; + V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576); + + // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]; + V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576); + + // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]; + V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); + + // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]; + V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); + + // V[i][5] = 24*Ww[i][2]; + V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576); + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + vst1q_f32(outptr + m*matrix_stride, V[i][j]); + } + } + + inptr += 4; + outptr += 4; + } +#endif // __aarch64__ + for (; n_channels >= 2; n_channels -= 2) + { + // Matrices used and computed in this kernel + float32x2_t w[3][3], Ww[6][3], V[6][6]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + w[i][j] = vld1_f32(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + // Ww[0][j] = 6*w[0][j]; + Ww[0][j] = vmul_n_f32(w[0][j], 6.0); + + // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; + Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0); + + // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; + Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0); + + // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; + Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); + + // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; + Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); + + // Ww[5][j] = 24*w[2][j]; + Ww[5][j] = vmul_n_f32(w[2][j], 24.0f); + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + const float recip576 = 1.0f / 576.0f; + + // V[i][0] = 6*Ww[i][0]; + V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576); + + // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]; + V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576); + + // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]; + V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576); + + // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]; + V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); + + // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]; + V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); + + // V[i][5] = 24*Ww[i][2]; + V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576); + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + vst1_f32(outptr + m*matrix_stride, V[i][j]); + } + } + + inptr += 2; + outptr += 2; + } + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + float w[3][3], Ww[6][3], V[6][6]; + + // Read weights + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + w[i][j] = *(inptr + i*ld_weight_row + j*ld_weight_col); + } + } + + // Compute the matrix W w + for (int j = 0; j < 3; j++) + { + Ww[0][j] = 6*w[0][j]; + Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; + Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; + Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; + Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; + Ww[5][j] = 24*w[2][j]; + } + + // Compute V = W w WT + for (int i = 0; i < 6; i++) + { + V[i][0] = ( 6*Ww[i][0]) / 576.0; + V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0; + V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0; + V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0; + V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0; + V[i][5] = (24*Ww[i][2]) / 576.0; + } + + // Store the transformed weights + for (int i = 0, m = 0; i < 6; i++) + { + for (int j = 0; j < 6; j++, m++) + { + *(outptr + m*matrix_stride) = V[i][j]; + } + } + + inptr++; + outptr++; + } +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x2_1x7.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x2_1x7.cpp new file mode 100644 index 0000000000..ee657b01cd --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x2_1x7.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void cpp_fp32_1x2_1x7( + unsigned int n_channels, + const float* inptr, size_t, size_t ld_weight_col, + float *outptr, size_t matrix_stride +) +{ + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + float w[7], V[8]; + + // Read weights + for (int j = 0; j < 7; j++) + { + w[j] = *(inptr + j*ld_weight_col); + } + + // Compute V = w WT + V[0] = (w[0]*-1) / 36.0f; + V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f; + V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f; + V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f; + V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f; + V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f; + V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f; + V[7] = (w[6]*1) / 1.0f; + + // Store the transformed weights + for (int j = 0; j < 8; j++) + { + *(outptr + j*matrix_stride) = V[j]; + } + + inptr++; + outptr++; + } +} + +} // namespace output_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x4_1x5.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x4_1x5.cpp new file mode 100644 index 0000000000..47a85e306d --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x4_1x5.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void cpp_fp32_1x4_1x5( + unsigned int n_channels, + const float *inptr, + size_t, // ld_weight_row + size_t ld_weight_col, + float *outptr, + size_t matrix_stride +) +{ + constexpr auto kernel_cols = 5u, inner_tile_cols = 8u; + + // For each output channel + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + float w[kernel_cols], V[inner_tile_cols]; + + // Read weights + for (auto j = 0u; j < kernel_cols; j++) + { + w[j] = *(inptr + j * ld_weight_col); + } + + // Compute V = w WT + V[0] = (w[0]*-1) / 36; + V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48; + V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48; + V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120; + V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120; + V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720; + V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720; + V[7] = (w[4]*1) / 1; + + // Store the transformed weights + for (auto j = 0u; j < inner_tile_cols; j++) + { + *(outptr + j*matrix_stride) = V[j]; + } + + inptr++; + outptr++; + } +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x6_1x3.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x6_1x3.cpp new file mode 100644 index 0000000000..22bb85e788 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms/cpp_fp32_1x6_1x3.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <cstddef> + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void cpp_fp32_1x6_1x3( + unsigned int n_channels, + const float *inptr, size_t, size_t ld_weight_col, + float *outptr, size_t matrix_stride +) +{ + for (; n_channels; n_channels--) + { + // Matrices used and computed in this kernel + float w[3], V[8]; + + // Read weights + for (int j = 0; j < 3; j++) + { + w[j] = *(inptr + j * ld_weight_col); + } + + // Compute V = w WT + V[0] = (w[0]*-1) / 36.0f; + V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f; + V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f; + V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f; + V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f; + V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f; + V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f; + V[7] = (w[2]*1) / 1; + + // Store the transformed weights + for (int j = 0; j < 8; j++) + { + *(outptr + j*matrix_stride) = V[j]; + } + + inptr++; + outptr++; + } +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_qa8_qa8.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms_fp16.cpp index b09f620475..6c8bbe07cf 100644 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_qa8_qa8.cpp +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms_fp16.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,12 +21,34 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "impl_qa8_qa8.hpp" -namespace depthwise +#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#include "winograd_implementations.hpp" +#include "weight_transform.hpp" + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +void *a64_fp16_4x4_3x3(unsigned int, const __fp16 *, size_t, size_t, __fp16 *, size_t); + +#define IMPL(KERN_ROWS, KERN_COLS, TRANS_ROWS, TRANS_COLS, KERN) \ + new Transform<__fp16>(#KERN, KERN_ROWS, KERN_COLS, TRANS_ROWS, TRANS_COLS, KERN) + +static const TransformImplementation<__fp16> transforms_fp16[] = { + { IMPL(3, 3, 6, 6, a64_fp16_4x4_3x3) }, + { nullptr } +}; + +template <> +const TransformImplementation<__fp16> *implementation_list(void) { -template class QAsymm8DepthwiseConvolution<2, 2, 3, 3, 1, 1>; -template class QAsymm8DepthwiseConvolution<2, 2, 3, 3, 2, 2>; -template class QAsymm8DepthwiseConvolution<2, 2, 5, 5, 1, 1>; -template class QAsymm8DepthwiseConvolution<2, 2, 5, 5, 2, 2>; -} // namespace depthwise + return transforms_fp16; +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv + +#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/convolution/winograd/weight_transforms_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/weight_transforms_fp32.cpp new file mode 100644 index 0000000000..d12f3c60c0 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/weight_transforms_fp32.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "winograd_implementations.hpp" +#include "weight_transform.hpp" + +namespace arm_conv { +namespace winograd { +namespace weight_transform { + +#if defined(__aarch64__) +#if defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) +void arm_fp32_4x4_3x3(unsigned int, const float *, size_t, size_t, float *, size_t); +void arm_fp32_2x2_3x3(unsigned int, const float *, size_t, size_t, float *, size_t); +void arm_fp32_2x2_5x5(unsigned int, const float *, size_t, size_t, float *, size_t); +void cpp_fp32_1x6_1x3(unsigned int, const float *, size_t, size_t, float *, size_t); +void cpp_fp32_1x4_1x5(unsigned int, const float *, size_t, size_t, float *, size_t); +void cpp_fp32_1x2_1x7(unsigned int, const float *, size_t, size_t, float *, size_t); + +#define IMPL(KERN_ROWS, KERN_COLS, TRANS_ROWS, TRANS_COLS, KERN) \ + new Transform<float>(#KERN, KERN_ROWS, KERN_COLS, TRANS_ROWS, TRANS_COLS, KERN) + +#define IMPL_T(KERN_ROWS, KERN_COLS, TRANS_ROWS, TRANS_COLS, KERN) \ + new Transform<float>(#KERN, KERN_ROWS, KERN_COLS, TRANS_ROWS, TRANS_COLS, Transform<float>::get_transposed_kernel(KERN)) + +static const TransformImplementation<float> transforms_fp32[] = { +#if defined(__aarch64__) +#if defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) + { IMPL(3, 3, 6, 6, arm_fp32_4x4_3x3) }, + { IMPL(3, 3, 4, 4, arm_fp32_2x2_3x3) }, + { IMPL(5, 5, 6, 6, arm_fp32_2x2_5x5) }, + { IMPL(1, 3, 1, 8, cpp_fp32_1x6_1x3) }, + { IMPL_T(3, 1, 8, 1, cpp_fp32_1x6_1x3) }, + { IMPL(1, 5, 1, 8, cpp_fp32_1x4_1x5) }, + { IMPL_T(5, 1, 8, 1, cpp_fp32_1x4_1x5) }, + { IMPL(1, 7, 1, 8, cpp_fp32_1x2_1x7) }, + { IMPL_T(7, 1, 8, 1, cpp_fp32_1x2_1x7) }, + { nullptr } +}; + +template <> +const TransformImplementation<float> *implementation_list(void) +{ + return transforms_fp32; +} + +} // namespace weight_transform +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd.cpp b/src/core/NEON/kernels/convolution/winograd/winograd.cpp deleted file mode 100644 index d556112853..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd.cpp +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <cstring> -#include "utils.hpp" -#include "winograd.hpp" - -using namespace winograd; -using array2 = std::pair<unsigned int, unsigned int>; - -#define MEMBERFN(RTYPE) \ - template <int output_tile_rows, int output_tile_cols, int kernel_rows, \ - int kernel_cols, WinogradRoots roots> \ - template <typename TOut, typename TIn, typename TGEMMIn, typename TGEMMOut> \ - RTYPE WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, \ - kernel_cols, \ - roots>::Convolution<TOut, TIn, TGEMMIn, TGEMMOut> - -/** Get the output shape of a convolution. */ -MEMBERFN(array2) -::get_output_shape(const std::pair<unsigned int, unsigned int> input_shape, - const bool padding_same) { - const unsigned int n_rows = - padding_same ? input_shape.first : input_shape.first - (kernel_rows - 1); - const unsigned int n_cols = padding_same - ? input_shape.second - : input_shape.second - (kernel_cols - 1); - return {n_rows, n_cols}; -} - -/** Get the memory required to store the kernel transformed into the - * Winograd domain. - */ -MEMBERFN(size_t) -::get_kernel_storage_size(const unsigned int n_input_channels, - const unsigned int n_output_channels) { - return N_GEMMS * get_kernel_matrix_size(n_input_channels, n_output_channels); -} - -MEMBERFN(size_t) -::get_input_storage_size(const unsigned int n_batches, - const unsigned int n_rows, const unsigned int n_cols, - const unsigned int n_channels, - const bool same_padding) { - return N_GEMMS * get_input_matrix_size(n_batches, n_rows, n_cols, n_channels, - same_padding); -} - -MEMBERFN(size_t) -::get_output_storage_size(const unsigned int n_batches, - const unsigned int n_rows, const unsigned int n_cols, - const unsigned int n_channels) { - return N_GEMMS * - get_output_matrix_size(n_batches, n_rows, n_cols, n_channels); -} - -/** Get the memory required to apply a Winograd operator to some input. - */ -MEMBERFN(size_t) -::get_working_space_size(const unsigned int n_batches, - const unsigned int n_rows, const unsigned int n_cols, - const unsigned int n_input_channels, - const unsigned int n_output_channels, - const bool padding_same) { - const auto output_shape = get_output_shape({n_rows, n_cols}, padding_same); - - // Get the memory required to store the matrices - const size_t matrix_sizes = - N_GEMMS * - (get_input_matrix_size(n_batches, n_rows, n_cols, n_input_channels, - padding_same) + - get_output_matrix_size(n_batches, output_shape.first, - output_shape.second, n_output_channels)); - return matrix_sizes; -} - -/* Get the memory required by a single "input" matrix. - */ -MEMBERFN(size_t) -::get_input_matrix_size(const unsigned int n_batches, const unsigned int n_rows, - const unsigned int n_cols, - const unsigned int n_channels, - const bool same_padding) { - return get_input_matrix_stride(n_batches, n_rows, n_cols, n_channels, - same_padding) * - sizeof(TGEMMIn); -} - -MEMBERFN(int) -::get_input_matrix_stride(const unsigned int n_batches, const unsigned int n_rows, - const unsigned int n_cols, - const unsigned int n_channels, - const bool same_padding) { - const auto output_shape = get_output_shape({n_rows, n_cols}, same_padding); - const unsigned int tile_rows = iceildiv(output_shape.first, output_tile_rows); - const unsigned int tile_cols = - iceildiv(output_shape.second, output_tile_cols); - const unsigned int M = - roundup<unsigned int>(n_batches * tile_rows * tile_cols, M_BLOCK); - const unsigned int K = n_channels; - - return M * K; -} - -/* Get the memory required by a single "output" matrix. - */ -MEMBERFN(size_t) -::get_output_matrix_size(const unsigned int n_batches, - const unsigned int n_rows, const unsigned int n_cols, - const unsigned int n_channels) { - return get_output_matrix_stride(n_batches, n_rows, n_cols, n_channels) * - sizeof(TGEMMOut); -} - -MEMBERFN(int) -::get_output_matrix_stride(const unsigned int n_batches, - const unsigned int n_rows, const unsigned int n_cols, - const unsigned int n_channels) { - // Compute shape for the GEMM - const int tile_rows = iceildiv(n_rows, output_tile_rows); - const int tile_cols = iceildiv(n_cols, output_tile_cols); - const int M = roundup<int>(tile_rows * tile_cols, M_BLOCK); - const int N = roundup<int>(n_channels, N_BLOCK); - - return n_batches * M * N; -} - - -/* Get the memory required by a single "kernel" matrix. - */ -MEMBERFN(size_t) -::get_kernel_matrix_size(const unsigned int n_input_channels, - const unsigned int n_output_channels) { - return sizeof(TGEMMIn) * - get_kernel_matrix_stride(n_input_channels, n_output_channels); -} - -MEMBERFN(int) -::get_kernel_matrix_stride(const unsigned int n_input_channels, - const unsigned int n_output_channels) { - return n_input_channels * roundup<int>(n_output_channels, N_BLOCK); -} - -// Instantiate required implementations -template class WinogradGEMM<2, 2, 3, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>; -template class WinogradGEMM<4, 4, 3, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>; - -template class WinogradGEMM<1, 6, 1, 3, WinogradRoots::Integers>::Convolution<float, float, float, float>; -template class WinogradGEMM<6, 1, 3, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>; - -template class WinogradGEMM<2, 2, 5, 5, WinogradRoots::Integers>::Convolution<float, float, float, float>; - -template class WinogradGEMM<1, 4, 1, 5, WinogradRoots::Integers>::Convolution<float, float, float, float>; -template class WinogradGEMM<4, 1, 5, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>; - -template class WinogradGEMM<1, 2, 1, 7, WinogradRoots::Integers>::Convolution<float, float, float, float>; -template class WinogradGEMM<2, 1, 7, 1, WinogradRoots::Integers>::Convolution<float, float, float, float>; - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template class WinogradGEMM<4, 4, 3, 3, WinogradRoots::Integers>::Convolution<__fp16, __fp16, __fp16, __fp16>; -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/src/core/NEON/kernels/convolution/winograd/winograd.hpp b/src/core/NEON/kernels/convolution/winograd/winograd.hpp deleted file mode 100644 index ac82e7b7b9..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd.hpp +++ /dev/null @@ -1,621 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "arm_gemm.hpp" - -#include <cstddef> -#include <utility> - -namespace winograd -{ - -class ITransform -{ - public: - virtual ~ITransform() = default; - - /** - * Get the working space required to perform the transformation. - * - * Note, the working space is only required when performing the - * transformation - hence it can be reused whenever the transformation is - * not running. - * - * @param nthreads The greatest number of threads that will be used to execute the transform. - * @return Size of working space required in bytes. - */ - virtual size_t get_working_space_size(unsigned int nthreads=1) const = 0; - - /** - * Set the working space to be used by the transformation. - * - * Note, the working space is only required when performing the - * transformation - hence it can be reused whenever the transformation is - * not running. - * - * @param Pointer to the working space. - */ - virtual void set_working_space(void *buffer) = 0; - - /** - * Get the window of work a given operator can perform. - */ - virtual unsigned int get_window() const = 0; - - /** - * Perform work upon a window of the transform. - */ - virtual void run(unsigned int start, unsigned int stop, unsigned int threadid=0) = 0; -}; - -class IInputTransform : public ITransform -{ - public: - virtual ~IInputTransform() = default; - - /** - * Set the pointer to the (NHWC-ordered) tensor to be transformed. - */ - virtual void set_input_tensor(const void *input) = 0; - - /** - * Set the pointer to the (NHWC-ordered) tensor to be transformed. - * @param col_stride Stride between columns of the tensor, measured in elements (not bytes). - */ - virtual void set_input_tensor(const void *input, int col_stride) = 0; - - /** - * Set the pointer to the (NHWC-ordered) tensor to be transformed. - * @param row_stride Stride between rows of the tensor, measured in elements (not bytes). - * @param col_stride Stride between columns of the tensor, measured in elements (not bytes). - */ - virtual void set_input_tensor(const void *input, int row_stride, int col_stride) = 0; - - /** - * Set the pointer to the (NHWC-ordered) tensor to be transformed. - * @param batch_stride Stride between batches of the tensor, measured in elements (not bytes). - * @param row_stride Stride between rows of the tensor, measured in elements (not bytes). - * @param col_stride Stride between columns of the tensor, measured in elements (not bytes). - */ - virtual void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) = 0; - - /** - * Set pointers to the matrices written by the transform. - * @param matrices Pointer to the start of the first matrix representing the transformed input. - * @param inter_matrix_stride Stride (in elements) between matrices. - * @param matrix_row_stride Stride (in elements) between the rows within a single matrix. - */ - virtual void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0; -}; - -class IOutputTransform : public ITransform -{ - public: - virtual ~IOutputTransform() = default; - - /** - * Set pointers to the matrices written by the transform. - * @param matrices Pointer to the start of the first matrix representing the input to the transform. - * @param inter_matrix_stride Stride (in elements) between matrices. - * @param matrix_row_stride Stride (in elements) between the rows within a single matrix. - */ - virtual void set_input_matrices(const void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0; - - /** - * Set pointer to the bias tensor (can be ignored or called with nullptr for no bias. - */ - virtual void set_bias(const void *bias=nullptr) = 0; - - /** - * Set pointer to the output tensor produced by the transform. - */ - virtual void set_output_tensor(void *output) = 0; - - /** - * Set pointer to the output tensor produced by the transform. - * @param col_stride Stride between columns of the tensor, measured in elements (not bytes). - */ - virtual void set_output_tensor(void *output, int col_stride) = 0; - - /** - * Set pointer to the output tensor produced by the transform. - * @param row_stride Stride between rows of the tensor, measured in elements (not bytes). - * @param col_stride Stride between columns of the tensor, measured in elements (not bytes). - */ - virtual void set_output_tensor(void *output, int row_stride, int col_stride) = 0; - - /** - * Set pointer to the output tensor produced by the transform. - * @param batch_stride Stride between batches of the tensor, measured in elements (not bytes). - * @param row_stride Stride between rows of the tensor, measured in elements (not bytes). - * @param col_stride Stride between columns of the tensor, measured in elements (not bytes). - */ - virtual void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) = 0; -}; - -class IWeightTransform : public ITransform -{ - public: - virtual ~IWeightTransform() = default; - - /** Set pointer to the weight tensor read by the transform. */ - virtual void set_weight_tensor(const void *weights) = 0; - - /** - * Set pointers to the matrices written by the transform. - * @param matrices Pointer to the start of the first matrix representing the transformed input. - * @param inter_matrix_stride Stride (in elements) between matrices. - * @param matrix_row_stride Stride (in elements) between the rows within a single matrix. - */ - virtual void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) = 0; -}; - -enum class WinogradRoots -{ - Integers, -}; - -template <int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots> -class InputTransform : public IInputTransform -{ - public: - /** Create an InputTransform operator fixed on a given problem and set of - * pointers. - */ - InputTransform( - int kernel_rows, /**< Number of rows in the kernel */ - int kernel_cols, /**< Number of columns in the kernel */ - int n_batches, /**< Number of batches in input tensor. */ - int n_rows, /**< Number of rows in input tensor. */ - int n_cols, /**< Number of columns in input tensor. */ - int n_channels, /**< Number of channels in input tensor. */ - int padding_top, /**< Padding to apply to the top of the image. */ - int padding_left, /**< Padding to apply to the left of the image. */ - int padding_bottom, /**< Padding to apply to the bottom of the image. */ - int padding_right /**< Padding to apply to the right of the image. */ - ); - - InputTransform(InputTransform&) = delete; - InputTransform operator=(InputTransform&) = delete; - - /** Set pointers to the input tensor read by the transform. */ - void set_input_tensor(const void *input) override; - void set_input_tensor(const void *input, int col_stride) override; - void set_input_tensor(const void *input, int row_stride, int col_stride) override; - void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) override; - - /** Set pointers to the matrices written by the transform. */ - void set_output_matrices(void *matrices, int iter_matrix_stride, int matrix_row_stride) override; - - /** Get the working space required to perform the transformation. */ - size_t get_working_space_size(unsigned int nthreads=1) const override; - void set_working_space(void *buffer) override; - - /** Get the window of work a given operator can perform. */ - unsigned int get_window() const override; - static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window - - /** Perform work upon a window of the input. */ - void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override; - - protected: - const int _n_batches, _n_rows, _n_cols, _n_channels; - - private: - void transform_unpadded_tile( - unsigned int threadid, - int n_channels, - TOut *outptr, - const TIn *inptr - ); - - void transform_padded_tile( - unsigned int threadid, - int n_channels, - TOut *outptr, - const TIn *inptr, - int padding_top, - int padding_left, - int padding_bottom, - int padding_right - ); - - /* Tile implementation */ - static void transform_tile( - int n_channels, /** @param[in] Number of channels in the tensor. */ - const TIn* inptr_base, /** @param[in] Pointer to the base of the input tile. */ - int input_row_stride, /** @param[in] Stride between rows of the input tensor. */ - int input_col_stride, /** @param[in] Stride between columns of the input tensor. */ - TOut* mptr_base, /** @param[out] Base pointer to transformed input matrices. */ - int matrix_stride /** @param[in] Stride between matrices in the input space. */ - ); - - /** Get the working space for a thread. */ - void * get_working_space(unsigned int threadid) const; - - const TIn* _inptr; - TOut* _outptr; - - const int _overlap_rows, _overlap_cols; - const int _padding_top, _padding_left, _padding_bottom, _padding_right; - const int _tiles_M, _tiles_N; - int _matrix_stride, _matrix_row_stride, _matrix_batch_stride; - int _in_col_stride, _in_row_stride, _in_batch_stride; - - const int _working_space_col_stride, _working_space_row_stride; - TIn *_working_space; -}; - -template <int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots> -class InputTransform<InnerTileRows, 1, TIn, TOut, Roots> : - public InputTransform<1, InnerTileRows, TIn, TOut, Roots> -{ - using Base = InputTransform<1, InnerTileRows, TIn, TOut, Roots>; - - public: - InputTransform( - int kernel_rows, /**< Number of rows in the kernel. */ - int kernel_cols, /**< Number of columns in the kernel. */ - int n_batches, /**< Number of batches in input tensor. */ - int n_rows, /**< Number of rows in input tensor. */ - int n_cols, /**< Number of columns in input tensor. */ - int n_channels, /**< Number of channels in input tensor. */ - int padding_top, /**< Padding to apply to the top of the image. */ - int padding_left, /**< Padding to apply to the left of the image. */ - int padding_bottom, /**< Padding to apply to the bottom of the image. */ - int padding_right /**< Padding to apply to the right of the image. */ - ); - - /** Set pointers to the input tensor read by the transform. */ - void set_input_tensor(const void *input) override; - void set_input_tensor(const void *input, int col_stride) override; - void set_input_tensor(const void *input, int row_stride, int col_stride) override; - void set_input_tensor(const void *input, int batch_stride, int row_stride, int col_stride) override; -}; - -template < - int KernelRows, int KernelCols, - int InnerTileRows, int InnerTileCols, - typename TIn, typename TOut, - WinogradRoots Roots -> -class OutputTransform : public IOutputTransform -{ - public: - OutputTransform( - int n_batches, /**< Number of batches in output tensor. */ - int n_rows, /**< Number of rows in output tensor. */ - int n_cols, /**< Number of columns in output tensor. */ - int n_channels, /**< Number of channels in output tensor. */ - const arm_gemm::Activation &activation - ); - - OutputTransform(OutputTransform&) = delete; - OutputTransform operator=(OutputTransform&) = delete; - - /** Set pointers to the matrices read by the transform. */ - void set_input_matrices(const void *matrices, int iter_matrix_stride, int matrix_row_stride) override; - - /** Set pointer to the bias tensor (can be ignored or called with nullptr for no bias */ - void set_bias(const void *bias=nullptr) override; - - /** Set pointers to the output tensor written by the transform. */ - void set_output_tensor(void *output) override; - void set_output_tensor(void *output, int col_stride) override; - void set_output_tensor(void *output, int row_stride, int col_stride) override; - void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) override; - - /** Get the working space required to perform the transformation. */ - size_t get_working_space_size(unsigned int nthreads=1) const override; - void set_working_space(void *buffer) override; - - /** Get the window of work a given operator can perform. */ - unsigned int get_window() const override; - static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window - - /** Perform work upon a window of the input. */ - void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override; - - protected: - static constexpr int inner_tile_rows = InnerTileRows; - static constexpr int inner_tile_cols = InnerTileCols; - static constexpr int output_tile_rows = InnerTileRows - KernelRows + 1; - static constexpr int output_tile_cols = InnerTileCols - KernelCols + 1; - - const int _n_batches, _n_rows, _n_cols, _n_channels; - const TOut _output_min, _output_max; - - private: - void transform_uncropped_tile( - unsigned int threadid, - int n_channels, - TOut *outptr, - const TIn *inptr, - const TOut *biases - ); - - void transform_cropped_tile( - unsigned int threadid, - int n_channels, - TOut *outptr, - const TIn *inptr, - const TOut *biases, - int pad_bottom, - int pad_right - ); - - /** Implementation of the tile transformation method. */ - static void transform_tile( - int n_channels, - const TIn* matrix_base, - int matrix_stride, - const TOut* biases, - TOut* output, - int output_row_stride, - int output_col_stride, - TOut output_min, - TOut output_max - ); - - /** Get the working space for a thread. */ - void * get_working_space(unsigned int threadid) const; - - const TIn* _matrix_base; - const TOut* _biases; - int _matrix_stride, _matrix_row_stride, _matrix_batch_stride; - TOut* _outptr; - const int _tiles_M, _tiles_N; - int _out_col_stride, _out_row_stride, _out_batch_stride; - - const int _working_space_col_stride, _working_space_row_stride; - TOut *_working_space; -}; - -template < - int KernelRows, - int InnerTileRows, - typename TIn, typename TOut, - WinogradRoots Roots -> -class OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> : - public OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots> -{ - using Base = OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>; - - public: - OutputTransform( - int n_batches, /**< Number of batches in output tensor. */ - int n_rows, /**< Number of rows in output tensor. */ - int n_cols, /**< Number of columns in output tensor. */ - int n_channels, /**< Number of channels in output tensor. */ - const arm_gemm::Activation &activation - ); - - /** Set pointers to the output tensor written by the transform. */ - void set_output_tensor(void *output) override; - void set_output_tensor(void *output, int col_stride) override; - void set_output_tensor(void *output, int row_stride, int col_stride) override; - void set_output_tensor(void *output, int batch_stride, int row_stride, int col_stride) override; -}; - -template < - int KernelRows, int KernelCols, - int InnerTileRows, int InnerTileCols, - typename TIn, typename TOut, - WinogradRoots Roots -> -class WeightTransform : public IWeightTransform -{ - public: - WeightTransform( - int n_output_channels, /**< Number of output channels in the kernel. */ - int n_input_channels /**< Number of input channels in the kernel. */ - ); - - WeightTransform(WeightTransform&) = delete; - WeightTransform operator=(WeightTransform&) = delete; - - /** Set pointer to the weight tensor read by the transform. */ - void set_weight_tensor(const void *weights) override; - - /** Set pointer to the matrices written by the transform. */ - void set_output_matrices(void *matrices, int inter_matrix_stride, int matrix_row_stride) override; - - /** Get the working space required to perform the transformation. */ - size_t get_working_space_size(unsigned int nthreads=1) const override; - void set_working_space(void *buffer) override; - - /** Get the window of work a given operator can perform. */ - unsigned int get_window() const override; - static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window - - /** Perform work upon a window of the input. */ - void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override; - - protected: - static const int kernel_rows = KernelRows; - static const int kernel_cols = KernelCols; - static const int inner_tile_rows = InnerTileRows; - static const int inner_tile_cols = InnerTileCols; - - private: - /** Apply the transform to a tensor. */ - static void execute( - int n_output_channels, - int n_input_channels, - const TIn* input, - TOut* output, - int matrix_stride, - int matrix_row_stride - ); - - const int _n_output_channels, _n_input_channels; - TOut *_matrices; - int _matrix_stride, _matrix_row_stride; - const TIn *_weights; -}; - -template <int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots> -class WeightTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> : - public WeightTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots> -{ - public: - using WeightTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::WeightTransform; -}; - -template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols, WinogradRoots Roots> -class WinogradGEMM -{ - public: - // Information about the specific Winograd instance - static constexpr int output_tile_rows = OutputTileRows; - static constexpr int output_tile_cols = OutputTileCols; - static constexpr int kernel_rows = KernelRows; - static constexpr int kernel_cols = KernelCols; - static constexpr int inner_tile_rows = output_tile_rows + kernel_rows - 1; - static constexpr int inner_tile_cols = output_tile_cols + kernel_cols - 1; - static constexpr int N_GEMMS = inner_tile_rows * inner_tile_cols; - - /** Transform weights from the spatial to the Winograd domain. */ - template <typename TIn, typename TOut> - using WeightsTransform = WeightTransform< - KernelRows, KernelCols, inner_tile_rows, inner_tile_cols, - TIn, TOut, Roots - >; - - /** Transform input feature maps from the spatial to the Winograd domain. - */ - template <typename TIn, typename TOut> - using InputTransform = InputTransform< - inner_tile_rows, inner_tile_cols, TIn, TOut, Roots - >; - - /** Transform output feature maps from the Winograd to the spatial domain. - */ - template <typename TIn, typename TOut> - using OutputTransform = OutputTransform< - KernelRows, KernelCols, inner_tile_rows, inner_tile_cols, - TIn, TOut, Roots - >; - - /** Perform a convolution. - */ - template <typename TOut, typename TIn, typename TInGEMM=TIn, typename TOutGEMM=TOut> - class Convolution - { - public: - // Information about the typed Winograd instance - typedef TOut OutputType; - typedef TOutGEMM GemmOutputType; - typedef TInGEMM GemmInputType; - typedef TIn InputType; - - /** Get the output shape of a convolution. */ - static std::pair<unsigned int, unsigned int> get_output_shape( - const std::pair<unsigned int, unsigned int> input_shape, - bool padding_same); - - /** Get the memory required to store the kernel transformed into the - * Winograd domain. - */ - static size_t get_kernel_storage_size(unsigned int n_input_channels, - unsigned int n_output_channels); - - /** Get the memory required to store the input tensor transformed into - * the Winograd domain. - */ - static size_t get_input_storage_size( - unsigned int n_batches, // Number of batches - unsigned int n_rows, // Number of input rows - unsigned int n_cols, // Number of input columns - unsigned int n_channels, // Number of input channels - bool padding_same); - - /** Get the memory required to store the output tensor in the Winograd - * domain. - */ - static size_t get_output_storage_size( - unsigned int n_batches, // Number of batches - unsigned int n_rows, // Number of output rows - unsigned int n_cols, // Number of output columns - unsigned int n_channels // Number of output channels - ); - - /** Get the memory required to apply a Winograd operator to some input. - */ - static size_t get_working_space_size( - unsigned int n_batches, - unsigned int n_rows, // Number of input rows - unsigned int n_cols, // Number of input columns - unsigned int n_input_channels, // Number of input channels - unsigned int n_output_channels, // Number of output channels - bool padding_same); - - /* Get the memory required by a single "input" matrix. - */ - static size_t get_input_matrix_size( - unsigned int n_batches, // Number of batches - unsigned int n_rows, // Number of input rows - unsigned int n_cols, // Number of input columns - unsigned int n_channels, // Number of input channels - bool padding_same); - - static int get_input_matrix_stride( - unsigned int n_batches, // Number of batches - unsigned int n_rows, // Number of input rows - unsigned int n_cols, // Number of input columns - unsigned int n_channels, // Number of input channels - bool padding_same); - - /* Get the memory required by a single "output" matrix. - */ - static size_t get_output_matrix_size( - unsigned int n_batches, // Number of batches - unsigned int n_rows, // Number of output rows - unsigned int n_cols, // Number of output columns - unsigned int n_channels // Number of output channels - ); - - static int get_output_matrix_stride( - unsigned int n_batches, // Number of batches - unsigned int n_rows, // Number of output rows - unsigned int n_cols, // Number of output columns - unsigned int n_channels // Number of output channels - ); - - /* Get the memory required by a single "kernel" matrix. - */ - static size_t get_kernel_matrix_size(unsigned int n_input_channels, - unsigned int n_output_channels); - static int get_kernel_matrix_stride(unsigned int n_input_channels, - unsigned int n_output_channels); - - static constexpr int M_BLOCK = 4; /** Size of block used by GEMM. */ - static constexpr int N_BLOCK = 16; /** Size of block used by GEMM. */ - }; -}; - -} // namespace winograd diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_qs8_qs8.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_fp16.cpp index 1ae48b9417..e1ad9e458d 100644 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_qs8_qs8.cpp +++ b/src/core/NEON/kernels/convolution/winograd/winograd_fp16.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,11 +21,25 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "impl_qa8_qs8_per_channel.hpp" -namespace depthwise { -template class QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 1, 1>; -template class QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 2, 2>; -template class QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 1, 1>; -template class QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 2, 2>; -} // namespace depthwise +#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#include "winograd_implementations.hpp" + +namespace arm_conv { +namespace winograd { + +template bool get_implementation<__fp16>( + WinogradImpl &, + const CPUInfo *, + const ConvolutionArgs &, + int max_threads, + bool fast_mode, + const WinogradConfig *, + const arm_gemm::GemmConfig * +); + +} // namespace winograd +} // namespace arm_conv + +#endif // defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_fp32.cpp index c13dd70a61..b92de1dde7 100644 --- a/src/core/NEON/kernels/convolution/depthwise/depthwise_fp32.cpp +++ b/src/core/NEON/kernels/convolution/winograd/winograd_fp32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,11 +21,21 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "impl_fp32_fp32.hpp" -namespace depthwise -{ -template class DepthwiseConvolution<4, 4, 3, 3, 2, 2, float, float, float>; -template class DepthwiseConvolution<4, 4, 5, 5, 1, 1, float, float, float>; -template class DepthwiseConvolution<3, 3, 5, 5, 2, 2, float, float, float>; -} // namespace depthwise +#include "winograd_implementations.hpp" + +namespace arm_conv { +namespace winograd { + +template bool get_implementation<float>( + WinogradImpl &, + const CPUInfo *, + const ConvolutionArgs &, + int max_threads, + bool fast_mode, + const WinogradConfig *, + const arm_gemm::GemmConfig * +); + +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp new file mode 100644 index 0000000000..af0dd04298 --- /dev/null +++ b/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "winograd.hpp" +#include <memory> +#include <string> + +namespace arm_conv { +namespace winograd { + +enum class MethodConstraints +{ + None, + RequiresSVE = 0x1, + RequiresSVE2 = 0x2, + RequiresSME = 0x4, + RequiresSME2 = 0x8, + LargerShape = 0x10, // Input tensor shape is larger than the output transform tile shape. +}; + +constexpr inline bool operator!(const MethodConstraints &c) +{ + return c == MethodConstraints::None; +} + +constexpr inline MethodConstraints operator|(const MethodConstraints &a, const MethodConstraints &b) +{ + return static_cast<MethodConstraints>(static_cast<unsigned int>(a) | static_cast<unsigned int>(b)); +} + +constexpr inline MethodConstraints operator&(const MethodConstraints &a, const MethodConstraints &b) +{ + return static_cast<MethodConstraints>(static_cast<unsigned int>(a) & static_cast<unsigned int>(b)); +} + +inline bool constraints_met(const MethodConstraints &c, const CPUInfo *ci, const ConvolutionArgs &, const WinogradConfig *) +{ + return ( + (!(c & MethodConstraints::RequiresSVE) || (ci->has_sve())) && + (!(c & MethodConstraints::RequiresSVE2) || (ci->has_sve2())) && + (!(c & MethodConstraints::RequiresSME) || (ci->has_sme())) && + (!(c & MethodConstraints::RequiresSME2) || (ci->has_sme2())) + // Add further constraints here + ); +} + +inline bool output_transform_constraints_met(const output_transform::ITransform *transform, const MethodConstraints &c, const CPUInfo *ci, const ConvolutionArgs &conv_args, const WinogradConfig *cfg) +{ + return ( + constraints_met(c, ci, conv_args, cfg) && + (!(c & MethodConstraints::LargerShape) || (conv_args.input_shape.rows > transform->get_output_rows() && conv_args.input_shape.cols > transform->get_output_cols())) + ); +} + +namespace weight_transform { + +template <typename TIn, typename TOut=TIn> +struct TransformImplementation +{ + std::unique_ptr<const ITransform> transform; + MethodConstraints constraints; + + TransformImplementation(const ITransform *transform, const MethodConstraints &constraints = MethodConstraints::None) + : transform(transform), constraints(constraints) + { + } +}; + +template <typename TIn, typename TOut=TIn> +const TransformImplementation<TIn, TOut> *implementation_list(void); + +} // namespace weight_transform + +namespace input_transform +{ + +template <typename TIn, typename TOut=TIn> +struct TransformImplementation +{ + std::unique_ptr<const ITransform> transform; + MethodConstraints constraints; + + TransformImplementation(const ITransform *transform, const MethodConstraints &constraints = MethodConstraints::None) + : transform(transform), constraints(constraints) + { + } +}; + +template <typename TIn, typename TOut=TIn> +const TransformImplementation<TIn, TOut> *implementation_list(void); + +} // namespace input_transform + +namespace output_transform +{ + +template <typename TIn, typename TOut=TIn> +struct TransformImplementation +{ + std::unique_ptr<const ITransform> transform; + MethodConstraints constraints; + + TransformImplementation(const ITransform *transform, const MethodConstraints &constraints = MethodConstraints::None) + : transform(transform), constraints(constraints) + { + } +}; + +template <typename TIn, typename TOut=TIn> +const TransformImplementation<TIn, TOut> *implementation_list(void); + +} // namespace output_transform + +namespace{ + +template <typename T> +constexpr T iceildiv(T num, T den) +{ + return (num + den - 1) / den; +} + +template <typename T> +constexpr T iroundup(T num, T den) +{ + return den * iceildiv(num, den); +} + +} + +template <typename TWeight, typename TWinogradIn> +inline std::vector<const weight_transform::ITransform *> get_weight_transforms( + const CPUInfo *ci, const ConvolutionArgs &conv_args, const WinogradConfig *cfg +) +{ + // Get target inner tile size + const auto target_inner_tile_rows = cfg->output_rows == 0 ? 0 : (conv_args.kernel_shape.rows + cfg->output_rows - 1); + const auto target_inner_tile_cols = cfg->output_cols == 0 ? 0 : (conv_args.kernel_shape.cols + cfg->output_cols - 1); + + std::vector<const weight_transform::ITransform *> weight_transforms; + for (auto impl = weight_transform::implementation_list<TWeight, TWinogradIn>(); + impl->transform.get() != nullptr; impl++) + { + // If this transform supports the requested kernel size, then add it to the + // list of weight transforms. + if ( + constraints_met(impl->constraints, ci, conv_args, cfg) && + impl->transform->get_kernel_rows() == conv_args.kernel_shape.rows && + impl->transform->get_kernel_cols() == conv_args.kernel_shape.cols && + (target_inner_tile_rows == 0 || target_inner_tile_rows == impl->transform->get_transformed_tile_rows()) && + (target_inner_tile_cols == 0 || target_inner_tile_cols == impl->transform->get_transformed_tile_cols()) && + (cfg->weight_transform_filter == "" || std::strstr(impl->transform->get_name().c_str(), cfg->weight_transform_filter.c_str())) + ) + { + weight_transforms.push_back(impl->transform.get()); + } + } + + return weight_transforms; +} + +template <typename TIn, typename TWinogradIn> +inline std::vector<const input_transform::ITransform *> get_input_transforms( + const CPUInfo *ci, const ConvolutionArgs &conv_args, const WinogradConfig *cfg +) +{ + // Get target inner tile size + const auto target_inner_tile_rows = cfg->output_rows == 0 ? 0 : (conv_args.kernel_shape.rows + cfg->output_rows - 1); + const auto target_inner_tile_cols = cfg->output_cols == 0 ? 0 : (conv_args.kernel_shape.cols + cfg->output_cols - 1); + + std::vector<const input_transform::ITransform *> input_transforms; + for (auto impl = input_transform::implementation_list<TIn, TWinogradIn>(); + impl->transform.get() != nullptr; impl++) + { + if( + constraints_met(impl->constraints, ci, conv_args, cfg) && + (target_inner_tile_rows == 0 || target_inner_tile_rows == impl->transform->get_input_rows()) && + (target_inner_tile_cols == 0 || target_inner_tile_cols == impl->transform->get_input_cols()) && + (cfg->input_transform_filter == "" || std::strstr(impl->transform->get_name().c_str(), cfg->input_transform_filter.c_str())) + ) + { + input_transforms.push_back(impl->transform.get()); + } + } + + return input_transforms; +} + +template <typename TWinogradOut, typename TOut> +inline std::vector<const output_transform::ITransform *> get_output_transforms( + const CPUInfo *ci, const ConvolutionArgs &conv_args, const WinogradConfig *cfg +) +{ + std::vector<const output_transform::ITransform *> output_transforms; + for (auto impl = output_transform::implementation_list<TWinogradOut, TOut>(); + impl->transform.get() != nullptr; impl++) + { + if( + output_transform_constraints_met(impl->transform.get(), impl->constraints, ci, conv_args, cfg) && + impl->transform->get_kernel_rows() == conv_args.kernel_shape.rows && + impl->transform->get_kernel_cols() == conv_args.kernel_shape.cols && + (cfg->output_rows == 0 || cfg->output_rows == impl->transform->get_output_rows()) && + (cfg->output_cols == 0 || cfg->output_cols == impl->transform->get_output_cols()) && + (cfg->output_transform_filter == "" || std::strstr(impl->transform->get_name().c_str(), cfg->output_transform_filter.c_str())) + ) + { + output_transforms.push_back(impl->transform.get()); + } + } + + return output_transforms; +} + +template <typename TIn, typename TWeight, typename TOut, typename TWinogradIn, typename TWinogradOut> +bool get_implementation( + WinogradImpl &dest, // Destination for the selected implementation + const CPUInfo *ci, + const ConvolutionArgs &conv_args, + int max_threads, + bool fast_mode, + const WinogradConfig *cfg, + const arm_gemm::GemmConfig *gemm_cfg +) +{ + // Get vectors of valid weight, input and output transforms; then select the + // combination which produces the biggest output tile. + const auto weight_transforms = get_weight_transforms<TWeight, TWinogradIn>(ci, conv_args, cfg); + const auto input_transforms = get_input_transforms<TIn, TWinogradIn>(ci, conv_args, cfg); + const auto output_transforms = get_output_transforms<TWinogradOut, TOut>(ci, conv_args, cfg); + + // Now attempt to select a complete set of Winograd transformations which can + // solve the problem. Work backwards from the output transform to find + // matching input implementations. + bool success = false; + for (auto output_transform = output_transforms.cbegin(); + !success && output_transform != output_transforms.cend(); + output_transform++) + { + // Look for matching weight transforms, if we find one then we look for + // matching input transforms. + for (auto weight_transform = weight_transforms.cbegin(); + !success && weight_transform != weight_transforms.cend(); + weight_transform++) + { + // If this weight transform is compatible, then look for a matching input + // transform + if ((*output_transform)->get_input_rows() == (*weight_transform)->get_transformed_tile_rows() && + (*output_transform)->get_input_cols() == (*weight_transform)->get_transformed_tile_cols()) + { + for (auto input_transform = input_transforms.cbegin(); + !success && input_transform != input_transforms.cend(); + input_transform++) + { + // If the input transform is suitable, then set the configuration and + // indicate success. + if ((*input_transform)->get_input_rows() == (*output_transform)->get_input_rows() && + (*input_transform)->get_input_cols() == (*output_transform)->get_input_cols()) + { + dest.output_transform = *output_transform; + dest.input_transform = *input_transform; + dest.weight_transform = *weight_transform; + success = true; + } + } + } + } + } + + if (!success) + { + return false; + } + + // If we're able to construct the Winograd elements, then specify the GEMM + // arguments required to perform the multiply-accumulate step of the + // convolution. + const auto n_output_row_tiles = iceildiv(conv_args.output_shape.rows, dest.output_transform->get_output_rows()); + const auto n_output_col_tiles = iceildiv(conv_args.output_shape.cols, dest.output_transform->get_output_cols()); + const auto n_output_patches = n_output_row_tiles * n_output_col_tiles; + + const int n_multis = dest.input_transform->get_input_rows() * + dest.input_transform->get_input_cols(); + + dest.gemm_args.reset(new arm_gemm::GemmArgs( + ci, + n_output_patches, // M + conv_args.n_output_channels, // N + conv_args.n_input_channels, // K + 1, // K-sections + conv_args.n_batches, // # Batches + n_multis, + false, // Indirect input + {}, // No activation + max_threads, + false, // Not fixed format + fast_mode, + gemm_cfg + )); + + // Also provide hints for the Winograd memory layout + auto &ws = dest.winograd_spec; + ws.weight_ld_row = iroundup(conv_args.n_output_channels, 4u); + ws.weight_ld_matrix = conv_args.n_input_channels * ws.weight_ld_row; + ws.weight_matrix_size_bytes = n_multis * ws.weight_ld_matrix * sizeof(TWinogradIn); + + ws.input_ld_row = iroundup(conv_args.n_input_channels, 4u); + ws.input_ld_matrix = iroundup(n_output_patches, 4u) * ws.input_ld_row; + ws.input_ld_batch = n_multis * ws.input_ld_matrix; + ws.input_matrix_size_bytes = conv_args.n_batches * ws.input_ld_batch * sizeof(TWinogradIn); + + ws.output_ld_row = ws.weight_ld_row; + ws.output_ld_matrix = n_output_patches * ws.output_ld_row; + ws.output_ld_batch = n_multis * ws.output_ld_matrix; + ws.output_matrix_size_bytes = conv_args.n_batches * ws.output_ld_batch * sizeof(TWinogradOut); + + return true; +} + +} // namespace winograd +} // namespace arm_conv diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp deleted file mode 100644 index 52ff7b3798..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once -#include "arm_gemm_local.hpp" -#include "arm_gemm.hpp" -#include "winograd.hpp" - -namespace winograd -{ - - -class IWinogradConvolutionLayer -{ - public: - virtual ~IWinogradConvolutionLayer() = default; - - virtual unsigned int weight_transform_get_window(void) const = 0; - virtual void weight_transform_run(unsigned int start, unsigned int stop) = 0; - - virtual IInputTransform& input_transform(void) = 0; // Expose the input transform - virtual IOutputTransform& output_transform(void) = 0; // Expose the output transform - virtual arm_gemm::IGemmCommon *gemm(void) = 0; // Expose the underlying GEMM -}; - -/** Example of how to construct an ACL-like interface. - * - * Use `get_weight_storage_size`, `get_input_storage_size` and - * `get_output_storage_size` to allocate memory for the convolution engine. - * Then create a `WinogradConvolutionLayer`. - * - * Initialise the weights using `weights_transform.run(...)`. - * - * For each inference: - * 1. Transform the inputs to the Winograd domain using `input_transform.run(...)` - * 2. Perform a number of GEMMs using `gemms.run(...)` - * 3. Transform the output to the spatial domain using `output_transform.run(...)` - */ -template <int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols, - typename TIn, typename TInGEMM, typename TOutGEMM, typename TOut, - WinogradRoots Roots> -class WinogradConvolutionLayer : public IWinogradConvolutionLayer -{ - public: - using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols, Roots>; - using WeightsTransform = typename WinogradBase::template WeightsTransform<TIn, TInGEMM>; - using InputTransform = typename WinogradBase::template InputTransform<TIn, TInGEMM>; - using WinogradConv = typename WinogradBase::template Convolution<TOut, TIn, TInGEMM, TOutGEMM>; - using OutputTransform = typename WinogradBase::template OutputTransform<TOutGEMM, TOut>; - - private: - static constexpr int InnerTileRows = OutputTileRows + KernelRows - 1; - static constexpr int InnerTileCols = OutputTileCols + KernelCols - 1; - static constexpr int N_GEMMS = InnerTileRows * InnerTileCols; - - const int _n_output_rows, _n_output_cols; - const int _kernel_matrix_stride, _kernel_matrix_row_stride; - const int _input_matrix_stride, _input_matrix_row_stride; - const int _output_matrix_stride, _output_matrix_row_stride; - const int _tile_rows, _tile_cols; - const int _m, _k, _n; - - WeightsTransform weights_transform; /** Operator to transform weights to Winograd domain. */ - InputTransform _input_transform; /** Operator to transform input to Winograd domain. */ - const arm_gemm::GemmArgs gemm_args; - arm_gemm::UniqueGemmCommon<TInGEMM, TOutGEMM> gemms; /** Operator to perform multiple GEMMs. */ - OutputTransform _output_transform; /** Operator to transform output from Winograd domain. */ - - public: - - /** Determine how much memory (in units of TIn) to allocate for the - * transformed weights. - */ - static unsigned int get_weight_storage_size( - const int n_output_channels, /** Number of output feature maps. */ - const int n_input_channels /** Number of input feature maps. */ - ); - - static unsigned int get_weight_stride( - const int n_output_channels, /** Number of output feature maps. */ - const int n_input_channels /** Number of input feature maps. */ - ); - - static unsigned int get_weight_multi_stride( - const int n_output_channels, /** Number of output feature maps. */ - const int n_input_channels /** Number of input feature maps. */ - ); - - /** Determine how much memory (in units of TIn) to allocate for the - * transformed input. - */ - static unsigned int get_input_storage_size( - const int n_batches, /** Number of batches in the input tensor. */ - const int n_channels, /** Number of feature maps in the input tensor. */ - const int n_rows, /** Number of rows in each feature map. */ - const int n_cols, /** Number of columns in each feature map. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - /** Get the row stride for the A matrix in the Winograd domain. */ - static unsigned int get_input_stride( - const int n_batches, /** Number of batches in the input tensor. */ - const int n_channels, /** Number of feature maps in the input tensor. */ - const int n_rows, /** Number of rows in each feature map. */ - const int n_cols, /** Number of columns in each feature map. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - /** Get the stride between A matrices in the Winograd domain. */ - static unsigned int get_input_multi_stride( - const int n_batches, /** Number of batches in the input tensor. */ - const int n_channels, /** Number of feature maps in the input tensor. */ - const int n_rows, /** Number of rows in each feature map. */ - const int n_cols, /** Number of columns in each feature map. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - /** Determine how much memory (in units of TOut) to allocate for the - * (Winograd domain) output. - */ - static unsigned int get_output_storage_size( - const int n_batches, /** Number of batches in the output tensor. */ - const int n_rows, /** Number of rows in each feature map of the input tensor. */ - const int n_cols, /** Number of columns in each feature map of the input tensor. */ - const int n_output_channels, /** Number of feature maps in the output tensor. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - static unsigned int get_output_stride( - const int n_batches, /** Number of batches in the output tensor. */ - const int n_rows, /** Number of rows in each feature map of the input tensor. */ - const int n_cols, /** Number of columns in each feature map of the input tensor. */ - const int n_output_channels, /** Number of feature maps in the output tensor. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - static unsigned int get_output_multi_stride( - const int n_batches, /** Number of batches in the output tensor. */ - const int n_rows, /** Number of rows in each feature map of the input tensor. */ - const int n_cols, /** Number of columns in each feature map of the input tensor. */ - const int n_output_channels, /** Number of feature maps in the output tensor. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - /** Get the shape (rows, cols) of a feature map of the output tensor. */ - static std::pair<int, int> get_output_feature_map_shape( - const int n_input_rows, /** Number of rows in the input feature map. */ - const int n_input_cols, /** Number of columns in the input feature map. */ - const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */ - ); - - /** Create a new Winograd convolution layer. - */ - WinogradConvolutionLayer( - const CPUInfo &cpuinfo, /** Describes CPU properties. */ - const int n_threads, /** Maximum number of threads used to execute the convolution. */ - const int n_batches, /** Number of batches in the input and output tensors. */ - const int n_input_channels, /** Number of feature maps in a batch of the input tensor. */ - const int n_input_rows, /** Number of rows in a feature map of the input tensor. */ - const int n_input_cols, /** Number of columns in a feature map of the input tensor. */ - const int n_output_channels, /** Number of feature maps in the output tensor. */ - const bool same_padding, /** Use "SAME" padding, otherwise use "VALID". */ - const arm_gemm::Activation &activation, - const TIn* const weights, /** Pointer to weight tensor in spatial domain. Must be ordered as "Height x Rows x Input Feature Maps x Output Feature Maps. */ - TInGEMM* const weights_storage, /** Pointer to storage for weight tensor in the Winograd domain. Must be at least the size returned by `get_weight_storage_size`. */ - const TIn* const input, /** Pointer to NHWC ordered input tensor, in the spatial domain. */ - TInGEMM* const winograd_input, /** Pointer to working space for the input tensor in the Winograd domain. Must be at least the size returned by `get_input_storage_size`. */ - const TOut* const biases, /** Pointer to biases vector. Pass nullptr if no bias is provided. */ - TOut* const output, /** Pointer to NHWC ordered output tensor, in the spatial domain. */ - TOutGEMM* const winograd_output, /** Pointer to working space for the output tensor in the Winograd domain. Must be at least the size returned by `get_output_storage_size`. */ - const bool pretranspose_B=true, /** Hint that the B matrix can be pretransposed. */ - arm_gemm::GemmConfig *gemm_cfg=nullptr /** Pointer to GEMM configuration. */ - ); - - /* Utility methods for interacting with the layer. */ - unsigned int weight_transform_get_window(void) const; - void weight_transform_run(const unsigned int start, const unsigned int stop); - - IInputTransform& input_transform(void); - IOutputTransform& output_transform(void); - - /* Get a pointer to the GEMM underlying the Winograd transform. */ - arm_gemm::IGemmCommon *gemm(void); -}; - -} diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp deleted file mode 100644 index c0f50beb2c..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input.hpp +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include <algorithm> - -#include "padding.hpp" -#include "utils.hpp" -#include "winograd.hpp" - -#define MEMBERFN(RTYPE) template <\ - int InnerTileRows, int InnerTileCols,\ - typename TIn, typename TOut, WinogradRoots Roots\ -> RTYPE InputTransform<InnerTileRows, InnerTileCols, TIn, TOut, Roots> - - -#define Nx1MEMBERFN(RTYPE) template <\ - int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\ -> RTYPE InputTransform<InnerTileRows, 1, TIn, TOut, Roots> - -namespace winograd -{ - -MEMBERFN()::InputTransform( - const int kernel_rows, - const int kernel_cols, - const int n_batches, - const int n_rows, - const int n_cols, - const int n_channels, - const int padding_top, - const int padding_left, - const int padding_bottom, - const int padding_right -) : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), _n_channels(n_channels), - _inptr(nullptr), _outptr(nullptr), - _overlap_rows(kernel_rows - 1), _overlap_cols(kernel_cols - 1), - _padding_top(padding_top), _padding_left(padding_left), _padding_bottom(padding_bottom), _padding_right(padding_right), - _tiles_M(iceildiv(padding_top + n_rows + padding_bottom - kernel_rows + 1, InnerTileRows - kernel_rows + 1)), - _tiles_N(iceildiv(padding_left + n_cols + padding_right - kernel_cols + 1, InnerTileCols - kernel_cols + 1)), - _matrix_stride(0), _matrix_row_stride(0), _matrix_batch_stride(0), - _in_col_stride(0), _in_row_stride(0), _in_batch_stride(0), - _working_space_col_stride(n_channels), - _working_space_row_stride(InnerTileCols * _working_space_col_stride), - _working_space(nullptr) -{ -} - -MEMBERFN(void)::set_input_tensor(const void* const inptr) -{ - set_input_tensor(inptr, _n_channels); -} - -MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol) -{ - set_input_tensor(inptr, _n_cols * ldcol, ldcol); -} - -MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol) -{ - set_input_tensor(inptr, _n_rows * ldrow, ldrow, ldcol); -} - -MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol) -{ - _inptr = static_cast<const TIn *>(inptr); - _in_batch_stride = ldbatch; - _in_row_stride = ldrow; - _in_col_stride = ldcol; -} - -MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow) -{ - _outptr = static_cast<TOut *>(mptr); - _matrix_stride = ldmatrix; - _matrix_row_stride = ldrow; - _matrix_batch_stride = _tiles_M * _tiles_N * ldrow; -} - -Nx1MEMBERFN()::InputTransform( - const int kernel_rows, - const int kernel_cols, - const int n_batches, - const int n_rows, - const int n_cols, - const int n_channels, - const int padding_top, - const int padding_left, - const int padding_bottom, - const int padding_right -) : InputTransform<1, InnerTileRows, TIn, TOut, Roots>::InputTransform( - /* Transpose rows and columns */ - kernel_cols, kernel_rows, n_batches, n_cols, n_rows, n_channels, - padding_left, padding_top, padding_right, padding_bottom - ) -{ -} - -Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr) -{ - set_input_tensor(inptr, this->_n_channels); -} - -Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldcol) -{ - set_input_tensor(inptr, this->_n_cols * ldcol, ldcol); -} - -Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldrow, const int ldcol) -{ - set_input_tensor(inptr, this->_n_rows * ldrow, ldrow, ldcol); -} - -Nx1MEMBERFN(void)::set_input_tensor(const void* const inptr, const int ldbatch, const int ldrow, const int ldcol) -{ - // Transpose row and column strides - Base::set_input_tensor(inptr, ldbatch, ldcol, ldrow); -} - -MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const -{ - return sizeof(TIn) * InnerTileRows * _working_space_row_stride * nthreads; -} - -MEMBERFN(void)::set_working_space(void * const buffer) -{ - _working_space = static_cast<TIn *>(buffer); -} - -MEMBERFN(unsigned int)::get_window(void) const -{ - return iceildiv(_n_channels, WINDOW_BLOCK); -} - -MEMBERFN(void)::run( - const unsigned int start, - const unsigned int stop, - const unsigned int threadid -) -{ - // Determine the channels on which to work - if (start >= get_window()) - { - return; // No work to do beyond the end of the window - } - const unsigned int start_channel = start * WINDOW_BLOCK; - const unsigned int stop_channel = std::min<unsigned int>(_n_channels , stop * WINDOW_BLOCK); - const unsigned int n_channels = stop_channel - start_channel; - - // Loop over batches - for (int batch = 0; batch < _n_batches; batch++) - { - const TIn* const inptr_batch = _inptr + start_channel + batch*_in_batch_stride; - TOut* const outptr_batch = _outptr + start_channel + batch*_matrix_batch_stride; - - // Loop over rows of tiles - for (int tile_i = 0; tile_i < _tiles_M; tile_i++) - { - // Compute the starting and ending row of pixels within the row of tiles, - // hence compute the padding to apply to the top and bottom of each tile. - const int row_top = tile_i * (InnerTileRows - _overlap_rows) - _padding_top; - const int row_bottom = row_top + InnerTileRows; - const int row_pad_top = std::max(0, _padding_top - tile_i * (InnerTileRows - _overlap_rows)); - const int row_pad_bottom = std::max(0, row_bottom - _n_rows); - - // Get a pointer to the start of the row. - const int row_offset = std::min(0, row_pad_top - _padding_top); - const TIn* const inptr_row = inptr_batch + _in_row_stride*(row_offset + tile_i*(InnerTileRows - _overlap_rows)); - TOut* const outptr_row = outptr_batch + tile_i*_tiles_N*_matrix_row_stride; - - // Loop over tiles within the row - for (int tile_j = 0; tile_j < _tiles_N; tile_j++) - { - // Compute the starting and ending column of pixels within the tile, - // hence compute the padding to apply to the left and right of the - // tile. - const int tile_left = tile_j * (InnerTileCols - _overlap_cols) - _padding_left; - const int tile_right = tile_left + InnerTileCols; - const int tile_pad_left = std::max(0, _padding_left - tile_j * (InnerTileCols - _overlap_cols)); - const int tile_pad_right = std::max(0, tile_right - _n_cols); - - // Get a pointer to the start of the tile. - const int col_offset = std::min(0, tile_pad_left - _padding_left); - const TIn* const inptr_tile = inptr_row + _in_col_stride*(col_offset + tile_j*(InnerTileCols - _overlap_cols)); - TOut* const outptr_tile = outptr_row + tile_j * _matrix_row_stride; - - // Transform the tile, applying padding if necessary. - if (row_pad_top || tile_pad_left || row_pad_bottom || tile_pad_right) - { - transform_padded_tile( - threadid, n_channels, outptr_tile, inptr_tile, - row_pad_top, tile_pad_left, row_pad_bottom, tile_pad_right - ); - } - else - { - transform_unpadded_tile(threadid, n_channels, outptr_tile, inptr_tile); - } - } - } - } -} - -MEMBERFN(void)::transform_unpadded_tile( - const unsigned int /* threadid unused */, - const int n_channels, - TOut * const outptr, - const TIn * const inptr -) -{ - transform_tile( - n_channels, inptr, _in_row_stride, _in_col_stride, outptr, _matrix_stride - ); -} - -MEMBERFN(void)::transform_padded_tile( - const unsigned int threadid, - const int n_channels, - TOut * const outptr, - const TIn * const inptr, - const int padding_top, - const int padding_left, - const int padding_bottom, - const int padding_right -) -{ - padding::copy_and_pad_tile( - InnerTileRows, InnerTileCols, n_channels, - inptr, _in_row_stride, _in_col_stride, - static_cast<TIn *>(get_working_space(threadid)), _working_space_row_stride, _working_space_col_stride, - padding_top, padding_left, padding_bottom, padding_right - ); - - transform_tile( - n_channels, static_cast<const TIn *>(get_working_space(threadid)), - _working_space_row_stride, _working_space_col_stride, - outptr, _matrix_stride - ); -} - -MEMBERFN(void *)::get_working_space(const unsigned int threadid) const -{ - return _working_space + InnerTileRows * _working_space_row_stride * threadid; -} - -} // namespace winograd diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp deleted file mode 100644 index 5e6ac97121..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/input_4x4_fp16_fp16_integers.cpp +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (c) 2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - -#include "input.hpp" -#include "arm.hpp" - -namespace winograd -{ - -template <> -void InputTransform<4, 4, __fp16, __fp16, WinogradRoots::Integers>::transform_tile( - const int n_channels, - const __fp16* const input_base, - const int input_row_stride, - const int input_col_stride, - __fp16* outptr, - const int matrix_stride -) -{ - constexpr int inner_tile_rows = 4, inner_tile_cols = 4; - - // Get pointers into the input tile - const __fp16 *x_ptrs[inner_tile_rows][inner_tile_cols]; - for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++) - { - // Get a pointer into the row - const __fp16* const row_ptr = input_base + xi*input_row_stride; - - for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++) - { - x_ptrs[i][j] = row_ptr + xj*input_col_stride; - } - } - - // Matrices used/computed in this kernel. - __fp16 x[inner_tile_rows][inner_tile_cols]; - __fp16 XTx[inner_tile_rows][inner_tile_cols]; - __fp16 U[inner_tile_rows][inner_tile_cols]; - - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = XTx[i][j] = 0.0f; - } - } - - // Perform the Winograd input transformation for each channel in the input - // tensor. - int channels_remaining = n_channels; -#ifdef __aarch64__ - for (; channels_remaining >= 8; channels_remaining -= 8) - { - // Matrices used/computed in this kernel. - float16x8_t x[inner_tile_rows][inner_tile_cols]; - float16x8_t XTx[inner_tile_rows][inner_tile_cols]; - float16x8_t U[inner_tile_rows][inner_tile_cols]; - - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = vdupq_n_f16(0.0f); - XTx[i][j] = vdupq_n_f16(0.0f); - } - } - - // Load x - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = vld1q_f16(x_ptrs[i][j]); - x_ptrs[i][j] += 8; - } - } - - // Compute XT . x - for (int j = 0; j < inner_tile_cols; j++) - { - // XTx[0][j] = x[0][j] - x[2][j]; - XTx[0][j] = vsubq_f16(x[0][j], x[2][j]); - - // XTx[1][j] = x[1][j] + x[2][j]; - XTx[1][j] = vaddq_f16(x[1][j], x[2][j]); - - // XTx[2][j] = x[2][j] - x[1][j]; - XTx[2][j] = vsubq_f16(x[2][j], x[1][j]); - - // XTx[3][j] = x[1][j] - x[3][j]; - XTx[3][j] = vsubq_f16(x[1][j], x[3][j]); - } - - // Compute U = XT . x . X - for (int i = 0; i < inner_tile_rows; i++) - { - // U[i][0] = XTx[i][0] - XTx[i][2]; - U[i][0] = vsubq_f16(XTx[i][0], XTx[i][2]); - - // U[i][1] = XTx[i][1] + XTx[i][2]; - U[i][1] = vaddq_f16(XTx[i][1], XTx[i][2]); - - // U[i][2] = XTx[i][2] - XTx[i][1]; - U[i][2] = vsubq_f16(XTx[i][2], XTx[i][1]); - - // U[i][3] = XTx[i][1] - XTx[i][3]; - U[i][3] = vsubq_f16(XTx[i][1], XTx[i][3]); - } - - // Store the transformed matrix - for (int i = 0, m = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++, m++) - { - vst1q_f16(outptr + m*matrix_stride, U[i][j]); - } - } - outptr += 8; - } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 4; channels_remaining -= 4) - { - // Matrices used/computed in this kernel. - float16x4_t x[inner_tile_rows][inner_tile_cols]; - float16x4_t XTx[inner_tile_rows][inner_tile_cols]; - float16x4_t U[inner_tile_rows][inner_tile_cols]; - - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = vdup_n_f16(0.0f); - XTx[i][j] = vdup_n_f16(0.0f); - } - } - - // Load x - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = vld1_f16(x_ptrs[i][j]); - x_ptrs[i][j] += 4; - } - } - - // Compute XT . x - for (int j = 0; j < inner_tile_cols; j++) - { - // XTx[0][j] = x[0][j] - x[2][j]; - XTx[0][j] = vsub_f16(x[0][j], x[2][j]); - - // XTx[1][j] = x[1][j] + x[2][j]; - XTx[1][j] = vadd_f16(x[1][j], x[2][j]); - - // XTx[2][j] = x[2][j] - x[1][j]; - XTx[2][j] = vsub_f16(x[2][j], x[1][j]); - - // XTx[3][j] = x[1][j] - x[3][j]; - XTx[3][j] = vsub_f16(x[1][j], x[3][j]); - } - - // Compute U = XT . x . X - for (int i = 0; i < inner_tile_rows; i++) - { - // U[i][0] = XTx[i][0] - XTx[i][2]; - U[i][0] = vsub_f16(XTx[i][0], XTx[i][2]); - - // U[i][1] = XTx[i][1] + XTx[i][2]; - U[i][1] = vadd_f16(XTx[i][1], XTx[i][2]); - - // U[i][2] = XTx[i][2] - XTx[i][1]; - U[i][2] = vsub_f16(XTx[i][2], XTx[i][1]); - - // U[i][3] = XTx[i][1] - XTx[i][3]; - U[i][3] = vsub_f16(XTx[i][1], XTx[i][3]); - } - - // Store the transformed matrix - for (int i = 0, m = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++, m++) - { - vst1_f16(outptr + m*matrix_stride, U[i][j]); - } - } - outptr += 4; - } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) - { - // Load x - for (int i = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++) - { - x[i][j] = *(x_ptrs[i][j]++); - } - } - - // Compute XT . x - for (int j = 0; j < inner_tile_cols; j++) - { - XTx[0][j] = x[0][j] - x[2][j]; - XTx[1][j] = x[1][j] + x[2][j]; - XTx[2][j] = x[2][j] - x[1][j]; - XTx[3][j] = x[1][j] - x[3][j]; - } - - // Compute U = XT . x . X - for (int i = 0; i < inner_tile_rows; i++) - { - U[i][0] = XTx[i][0] - XTx[i][2]; - U[i][1] = XTx[i][1] + XTx[i][2]; - U[i][2] = XTx[i][2] - XTx[i][1]; - U[i][3] = XTx[i][1] - XTx[i][3]; - } - - // Store the transformed matrix - for (int i = 0, m = 0; i < inner_tile_rows; i++) - { - for (int j = 0; j < inner_tile_cols; j++, m++) - { - *(outptr + m*matrix_stride) = U[i][j]; - } - } - outptr++; - } -} - -template class InputTransform<4, 4, __fp16, __fp16, WinogradRoots::Integers>; - -} // namespace -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp deleted file mode 100644 index 27d20811d6..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/kernel.hpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once -#include "winograd.hpp" -using namespace winograd; - -#define MEMBERFN(RTYPE) template <\ - int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename TIn, typename TOut, WinogradRoots Roots\ -> RTYPE WeightTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots> - -MEMBERFN()::WeightTransform( - const int n_output_channels, - const int n_input_channels -) : _n_output_channels(n_output_channels), _n_input_channels(n_input_channels), - _matrices(nullptr), _matrix_stride(0), _matrix_row_stride(0), _weights(nullptr) -{ - -} - -MEMBERFN(void)::set_weight_tensor(const void * const weights) -{ - _weights = static_cast<const TIn *>(weights); -} - -MEMBERFN(void)::set_output_matrices(void * const mptr, const int ldmatrix, const int ldrow) -{ - _matrices = static_cast<TOut *>(mptr); - _matrix_stride = ldmatrix; - _matrix_row_stride = ldrow; -} - -MEMBERFN(size_t)::get_working_space_size(unsigned int) const -{ - return 0; -} - -MEMBERFN(void)::set_working_space(void *) -{ -} - -MEMBERFN(unsigned int)::get_window(void) const -{ - // TODO When the weights transform supports multithreading, return the number - // of output channels. For now we return 1 to indicate that the weights must - // be transformed as a single block. - // return n_output_channels; - return 1; -} - -MEMBERFN(void)::run(const unsigned int, const unsigned int, unsigned int) -{ - execute( - _n_output_channels, _n_input_channels, _weights, - _matrices, _matrix_stride, _matrix_row_stride - ); -} diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp deleted file mode 100644 index c1fb559b1d..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/output.hpp +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include <algorithm> -#include "winograd.hpp" -#include "padding.hpp" -#include "utils.hpp" - -#define MEMBERFN(RTYPE) template<\ - int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols,\ - typename TIn, typename TOut, WinogradRoots Roots\ -> RTYPE OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, TIn, TOut, Roots> - -#define Nx1MEMBERFN(RTYPE) template<\ - int KernelRows, int InnerTileRows, typename TIn, typename TOut, WinogradRoots Roots\ -> RTYPE OutputTransform<KernelRows, 1, InnerTileRows, 1, TIn, TOut, Roots> - -namespace winograd -{ - -MEMBERFN() -::OutputTransform(const int n_batches, const int n_rows, const int n_cols, - const int n_channels, const arm_gemm::Activation &activation) - : _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols), - _n_channels(n_channels), - _output_min((activation.type == arm_gemm::Activation::Type::ReLU || - activation.type == arm_gemm::Activation::Type::BoundedReLU) - ? static_cast<TOut>(0.0f) : TypeBounds<TOut>::lower()), - _output_max((activation.type == arm_gemm::Activation::Type::BoundedReLU) - ? static_cast<TOut>(activation.param1) : TypeBounds<TOut>::upper()), - _matrix_base(nullptr), _biases(nullptr), _matrix_stride(0), - _matrix_row_stride(0), _matrix_batch_stride(0), _outptr(nullptr), - _tiles_M(iceildiv(n_rows, output_tile_rows)), - _tiles_N(iceildiv(n_cols, output_tile_cols)), _out_col_stride(0), - _out_row_stride(0), _out_batch_stride(0), - _working_space_col_stride(n_channels), - _working_space_row_stride(output_tile_cols * _working_space_col_stride), - _working_space(nullptr) {} - -MEMBERFN(void)::set_input_matrices(const void * const mptr, const int ldmatrix, const int ldrow) -{ - _matrix_base = static_cast<const TIn *>(mptr); - _matrix_stride = ldmatrix; - _matrix_row_stride = ldrow; - _matrix_batch_stride = _tiles_M * _tiles_N * ldrow; -} - -MEMBERFN(void)::set_bias(const void * const bias) -{ - _biases = static_cast<const TOut *>(bias); -} - -MEMBERFN(void)::set_output_tensor(void * const outptr) -{ - set_output_tensor(outptr, _n_channels); -} - -MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol) -{ - set_output_tensor(outptr, _n_cols * ldcol, ldcol); -} - -MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol) -{ - set_output_tensor(outptr, _n_rows * ldrow, ldrow, ldcol); -} - -MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol) -{ - _outptr = static_cast<TOut *>(outptr); - _out_batch_stride = ldbatch; - _out_row_stride = ldrow; - _out_col_stride = ldcol; -} - -Nx1MEMBERFN()::OutputTransform( - const int n_batches, - const int n_rows, - const int n_cols, - const int n_channels, - const arm_gemm::Activation &activation -) : OutputTransform<1, KernelRows, 1, InnerTileRows, TIn, TOut, Roots>::OutputTransform( - n_batches, n_cols, n_rows, n_channels, activation /* Transpose rows and columns */ - ) -{ -} - -Nx1MEMBERFN(void)::set_output_tensor(void * const outptr) -{ - set_output_tensor(outptr, this->_n_channels); -} - -Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldcol) -{ - set_output_tensor(outptr, this->_n_cols * ldcol, ldcol); -} - -Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldrow, const int ldcol) -{ - set_output_tensor(outptr, this->_n_rows * ldrow, ldrow, ldcol); -} - -Nx1MEMBERFN(void)::set_output_tensor(void * const outptr, const int ldbatch, const int ldrow, const int ldcol) -{ - // Transpose rows and columns - Base::set_output_tensor(outptr, ldbatch, ldcol, ldrow); -} - -MEMBERFN(size_t)::get_working_space_size(const unsigned int nthreads) const -{ - return sizeof(TOut) * output_tile_rows * _working_space_row_stride * nthreads; -} - -MEMBERFN(void)::set_working_space(void * const buffer) -{ - _working_space = static_cast<TOut *>(buffer); -} - -MEMBERFN(unsigned int)::get_window(void) const -{ - return iceildiv(_n_channels, WINDOW_BLOCK); -} - -MEMBERFN(void)::run( - const unsigned int start, - const unsigned int stop, - const unsigned int threadid -) -{ - // Determine the channels on which to work - if (start >= get_window()) - { - return; // No work to do beyond the end of the window - } - const unsigned int start_channel = start * WINDOW_BLOCK; - const unsigned int stop_channel = std::min<unsigned int>(_n_channels, stop * WINDOW_BLOCK); - const unsigned int n_channels = stop_channel - start_channel; - - const auto matrix_tile_col_stride = _matrix_row_stride; - const auto matrix_tile_row_stride = _tiles_N * matrix_tile_col_stride; - - const TOut* const bptr = (_biases == nullptr) ? nullptr : _biases + start_channel; - - // Loop over batches - for (int batch = 0; batch < _n_batches; batch++) - { - const TIn* const matrix_batch = _matrix_base + start_channel + batch * _matrix_batch_stride; - TOut* const outptr_batch = _outptr + start_channel + batch * _out_batch_stride; - - for (int tile_i = 0; tile_i < _tiles_M; tile_i++) - { - // Compute properties of the row of output tiles - const int row_pad_bottom = std::max(0, (tile_i + 1)*output_tile_rows - _n_rows); - const TIn* const matrix_tile_row = matrix_batch + tile_i * matrix_tile_row_stride; - TOut* const outptr_row = outptr_batch + tile_i * output_tile_rows * _out_row_stride; - - for (int tile_j = 0; tile_j < _tiles_N; tile_j++) - { - // Compute property of this specific tile - const int tile_pad_right = std::max(0, (tile_j + 1)*output_tile_cols - _n_cols); - const TIn* const matrix_tile = matrix_tile_row + tile_j * matrix_tile_col_stride; - TOut* const outptr_tile = outptr_row + tile_j * output_tile_cols * _out_col_stride; - - // Perform the transformation - if (row_pad_bottom || tile_pad_right) - { - transform_cropped_tile( - threadid, n_channels, outptr_tile, matrix_tile, bptr, - row_pad_bottom, tile_pad_right - ); - } - else - { - transform_uncropped_tile( - threadid, n_channels, outptr_tile, matrix_tile, bptr - ); - } - } - } - } -} - -MEMBERFN(void)::transform_uncropped_tile( - const unsigned int /* threadid unused */, - const int n_channels, - TOut * const outptr, - const TIn * const inptr, - const TOut * const biases -) -{ - transform_tile( - n_channels, inptr, _matrix_stride, biases, - outptr, _out_row_stride, _out_col_stride, - _output_min, _output_max - ); -} - -MEMBERFN(void)::transform_cropped_tile( - const unsigned int threadid, - const int n_channels, - TOut * const outptr, - const TIn * const inptr, - const TOut * const biases, - const int pad_bottom, - const int pad_right -) -{ - // Transform into working space and then copy the relevant section out. - TOut *wsptr = static_cast<TOut *>(get_working_space(threadid)); - transform_tile( - n_channels, inptr, _matrix_stride, biases, - wsptr, _working_space_row_stride, _working_space_col_stride, - _output_min, _output_max - ); - - padding::crop_and_copy_tile( - output_tile_rows, output_tile_cols, n_channels, - wsptr, _working_space_row_stride, _working_space_col_stride, - outptr, _out_row_stride, _out_col_stride, - 0u, 0u, pad_bottom, pad_right - ); -} - -MEMBERFN(void *)::get_working_space(const unsigned int threadid) const -{ - return _working_space + output_tile_rows * _working_space_row_stride * threadid; -} - -} // namespace winograd diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp deleted file mode 100644 index 2ee377ceca..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2_7_fp32_fp32_integers.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const float* const input, // NOTE: Data in HWIO order - float* const output, - const int matrix_stride, - const int matrix_row_stride -) -{ - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const float *inptrs[kernel_cols]; - for (int j = 0; j < kernel_cols; j++) - { - inptrs[j] = input + j*weight_col_stride; - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - float *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; - for (; channels_remaining; channels_remaining--) - { - // Matrices used and computed in this kernel - float w[kernel_cols], V[inner_tile_cols]; - - // Read weights - for (int j = 0; j < kernel_cols; j++) - { - w[j] = *(inptrs[j]++); - } - - // Compute V = w WT - V[0] = (w[0]*-1) / 36.0f; - V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f; - V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f; - V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f; - V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f; - V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f; - V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f; - V[7] = (w[6]*1) / 1.0f; - - // Store the transformed weights - for (int j = 0; j < inner_tile_cols; j++) - { - *(outptr + j*matrix_stride) = V[j]; - } - outptr++; - } - } -} - -template class WeightTransform<1, 7, 1, 8, float, float, WinogradRoots::Integers>; -template class WeightTransform<7, 1, 8, 1, float, float, WinogradRoots::Integers>; - -} // namespace winograd diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp deleted file mode 100644 index 3fde4a7a6b..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_3x3_fp32_fp32_integers.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const float* const input, - float* const output, - const int matrix_stride, - const int matrix_row_stride -) -{ - constexpr int inner_tile_i = 4; - constexpr int inner_tile_j = 4; - - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const auto weight_row_stride = 3 * weight_col_stride; - const float *inptrs[3][3]; - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride; - } - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - float *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; -#ifdef __aarch64__ - for (; channels_remaining >= 4; channels_remaining -= 4) - { - // Matrices used and computed in this kernel - float32x4_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = vld1q_f32(inptrs[i][j]); - inptrs[i][j] += 4; - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - Ww[0][j] = w[0][j]; - - // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]); - Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); - - // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]); - Ww[2][j] = vmulq_n_f32(vaddq_f32(vsubq_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); - - Ww[3][j] = w[2][j]; - } - - // Compute V = W w WT - for (int i = 0; i < inner_tile_i; i++) - { - V[i][0] = Ww[i][0]; - - // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]); - V[i][1] = vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); - - // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]); - V[i][2] = vmulq_n_f32(vaddq_f32(vsubq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); - - V[i][3] = Ww[i][2]; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < inner_tile_i; i++) - { - for (int j = 0; j < inner_tile_j; j++, m++) - { - vst1q_f32(outptr + m*matrix_stride, V[i][j]); - } - } - outptr += 4; - } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 2; channels_remaining -= 2) - { - // Matrices used and computed in this kernel - float32x2_t w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = vld1_f32(inptrs[i][j]); - inptrs[i][j] += 2; - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - Ww[0][j] = w[0][j]; - - // Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]); - Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); - - // Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]); - Ww[2][j] = vmul_n_f32(vadd_f32(vsub_f32(w[0][j], w[1][j]), w[2][j]), 0.5f); - - Ww[3][j] = w[2][j]; - } - - // Compute V = W w WT - for (int i = 0; i < inner_tile_i; i++) - { - V[i][0] = Ww[i][0]; - - // V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]); - V[i][1] = vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); - - // V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]); - V[i][2] = vmul_n_f32(vadd_f32(vsub_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), 0.5f); - - V[i][3] = Ww[i][2]; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < inner_tile_i; i++) - { - for (int j = 0; j < inner_tile_j; j++, m++) - { - vst1_f32(outptr + m*matrix_stride, V[i][j]); - } - } - outptr += 2; - } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) - { - // Matrices used and computed in this kernel - float w[3][3], Ww[inner_tile_i][3], V[inner_tile_i][inner_tile_j]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = *(inptrs[i][j]++); - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - Ww[0][j] = w[0][j]; - Ww[1][j] = 0.5*(w[0][j] + w[1][j] + w[2][j]); - Ww[2][j] = 0.5*(w[0][j] - w[1][j] + w[2][j]); - Ww[3][j] = w[2][j]; - } - - // Compute V = W w WT - for (int i = 0; i < inner_tile_i; i++) - { - V[i][0] = Ww[i][0]; - V[i][1] = 0.5*(Ww[i][0] + Ww[i][1] + Ww[i][2]); - V[i][2] = 0.5*(Ww[i][0] - Ww[i][1] + Ww[i][2]); - V[i][3] = Ww[i][2]; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < inner_tile_i; i++) - { - for (int j = 0; j < inner_tile_j; j++, m++) - { - *(outptr + m*matrix_stride) = V[i][j]; - } - } - outptr++; - } - } -} - -template class WeightTransform<3, 3, 4, 4, float, float, WinogradRoots::Integers>; - -} // namespace diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp deleted file mode 100644 index 26ab56f24e..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_2x2_5x5_fp32_fp32_integers.cpp +++ /dev/null @@ -1,401 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const float* const input, - float* const output, - const int matrix_stride, - const int matrix_row_stride -) -{ - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const auto weight_row_stride = 5 * weight_col_stride; - const float *inptrs[5][5]; - for (int i = 0; i < 5; i++) - { - for (int j = 0; j < 5; j++) - { - inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride; - } - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - float *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; -#ifdef __aarch64__ - for (; channels_remaining >= 4; channels_remaining -= 4) - { - // Matrices used and computed in this kernel - float32x4_t w[5][5], Ww[6][5], V[6][6]; - - // Read weights - for (int i = 0; i < 5; i++) - { - for (int j = 0; j < 5; j++) - { - w[i][j] = vld1q_f32(inptrs[i][j]); - inptrs[i][j] += 4; - } - } - - // Compute the matrix W w - for (int j = 0; j < 5; j++) - { - // Ww[0][j] = w[0][j]/4.0f; - Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f); - - // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f; - Ww[1][j] = vmulq_n_f32( - vaddq_f32( - vaddq_f32( - vaddq_f32(w[1][j], w[0][j]), - vaddq_f32(w[3][j], w[2][j]) - ), - w[4][j] - ), - -1.0f/6.0f - ); - - // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f; - // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f; - Ww[2][j] = vmulq_n_f32( - vsubq_f32( - vaddq_f32( - vsubq_f32(w[1][j], w[0][j]), - vsubq_f32(w[3][j], w[2][j]) - ), - w[4][j] - ), - 1.0f/6.0f - ); - - // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f; - Ww[3][j] = vmulq_n_f32( - vmlaq_n_f32( - vaddq_f32( - vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)), - vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) - ), - w[4][j], 2.0f - ), - 1.0f/3.0f - ); - - // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f; - Ww[4][j] = vmulq_n_f32( - vmlaq_n_f32( - vaddq_f32( - vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)), - vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) - ), - w[4][j], 2.0f - ), - 1.0f/3.0f - ); - - // Ww[5][j] = w[4][j]; - Ww[5][j] = w[4][j]; - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - // V[i][0] = Ww[i][0]/4.0f; - V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f); - - // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f; - V[i][1] = vmulq_n_f32( - vaddq_f32( - vaddq_f32( - vaddq_f32(Ww[i][1], Ww[i][0]), - vaddq_f32(Ww[i][3], Ww[i][2]) - ), - Ww[i][4] - ), - -1.0f/6.0f - ); - - // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f; - // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f; - V[i][2] = vmulq_n_f32( - vsubq_f32( - vaddq_f32( - vsubq_f32(Ww[i][1], Ww[i][0]), - vsubq_f32(Ww[i][3], Ww[i][2]) - ), - Ww[i][4] - ), - 1.0f/6.0f - ); - - // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f; - V[i][3] = vmulq_n_f32( - vmlaq_n_f32( - vaddq_f32( - vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)), - vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) - ), - Ww[i][4], 2.0f - ), - 1.0f/3.0f - ); - - // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f; - V[i][4] = vmulq_n_f32( - vmlaq_n_f32( - vaddq_f32( - vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)), - vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) - ), - Ww[i][4], 2.0f - ), - 1.0f/3.0f - ); - - // V[i][5] = Ww[i][4]; - V[i][5] = Ww[i][4]; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - vst1q_f32(outptr + m*matrix_stride, V[i][j]); - } - } - outptr += 4; - } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 2; channels_remaining -= 2) - { - // Matrices used and computed in this kernel - float32x2_t w[5][5], Ww[6][5], V[6][6]; - - // Read weights - for (int i = 0; i < 5; i++) - { - for (int j = 0; j < 5; j++) - { - w[i][j] = vld1_f32(inptrs[i][j]); - inptrs[i][j] += 2; - } - } - - // Compute the matrix W w - for (int j = 0; j < 5; j++) - { - // Ww[0][j] = w[0][j]/4.0f; - Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f); - - // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f; - Ww[1][j] = vmul_n_f32( - vadd_f32( - vadd_f32( - vadd_f32(w[1][j], w[0][j]), - vadd_f32(w[3][j], w[2][j]) - ), - w[4][j] - ), - -1.0f/6.0f - ); - - // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f; - // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f; - Ww[2][j] = vmul_n_f32( - vsub_f32( - vadd_f32( - vsub_f32(w[1][j], w[0][j]), - vsub_f32(w[3][j], w[2][j]) - ), - w[4][j] - ), - 1.0f/6.0f - ); - - // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f; - Ww[3][j] = vmul_n_f32( - vmla_n_f32( - vadd_f32( - vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)), - vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) - ), - w[4][j], 2.0f - ), - 1.0f/3.0f - ); - - // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f; - Ww[4][j] = vmul_n_f32( - vmla_n_f32( - vadd_f32( - vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)), - vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j]) - ), - w[4][j], 2.0f - ), - 1.0f/3.0f - ); - - // Ww[5][j] = w[4][j]; - Ww[5][j] = w[4][j]; - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - // V[i][0] = Ww[i][0]/4.0f; - V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f); - - // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f; - V[i][1] = vmul_n_f32( - vadd_f32( - vadd_f32( - vadd_f32(Ww[i][1], Ww[i][0]), - vadd_f32(Ww[i][3], Ww[i][2]) - ), - Ww[i][4] - ), - -1.0f/6.0f - ); - - // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f; - // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f; - V[i][2] = vmul_n_f32( - vsub_f32( - vadd_f32( - vsub_f32(Ww[i][1], Ww[i][0]), - vsub_f32(Ww[i][3], Ww[i][2]) - ), - Ww[i][4] - ), - 1.0f/6.0f - ); - - // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f; - V[i][3] = vmul_n_f32( - vmla_n_f32( - vadd_f32( - vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)), - vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) - ), - Ww[i][4], 2.0f - ), - 1.0f/3.0f - ); - - // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f; - V[i][4] = vmul_n_f32( - vmla_n_f32( - vadd_f32( - vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)), - vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3]) - ), - Ww[i][4], 2.0f - ), - 1.0f/3.0f - ); - - // V[i][5] = Ww[i][4]; - V[i][5] = Ww[i][4]; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - vst1_f32(outptr + m*matrix_stride, V[i][j]); - } - } - outptr += 2; - } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) - { - // Matrices used and computed in this kernel - float w[5][5], Ww[6][5], V[6][6]; - - // Read weights - for (int i = 0; i < 5; i++) - { - for (int j = 0; j < 5; j++) - { - w[i][j] = *(inptrs[i][j]++); - } - } - - // Compute the matrix W w - for (int j = 0; j < 5; j++) - { - Ww[0][j] = w[0][j]/4.0f; - Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f; - Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f; - Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f; - Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f; - Ww[5][j] = w[4][j]; - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - V[i][0] = Ww[i][0]/4.0f; - V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f; - V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f; - V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f; - V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f; - V[i][5] = Ww[i][4]; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - *(outptr + m*matrix_stride) = V[i][j]; - } - } - outptr++; - } - } -} - -template class WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>; - -} // namespace winograd diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp deleted file mode 100644 index eeda274453..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4_5_fp32_fp32_integers.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const float* const input, // NOTE: Data in HWIO order - float* const output, - const int matrix_stride, - const int matrix_row_stride -) -{ - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const float *inptrs[kernel_cols]; - for (int j = 0; j < kernel_cols; j++) - { - inptrs[j] = input + j*weight_col_stride; - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - float *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; - for (; channels_remaining; channels_remaining--) - { - // Matrices used and computed in this kernel - float w[kernel_cols], V[inner_tile_cols]; - - // Read weights - for (int j = 0; j < kernel_cols; j++) - { - w[j] = *(inptrs[j]++); - } - - // Compute V = w WT - V[0] = (w[0]*-1) / 36; - V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48; - V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48; - V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120; - V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120; - V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720; - V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720; - V[7] = (w[4]*1) / 1; - - // Store the transformed weights - for (int j = 0; j < inner_tile_cols; j++) - { - *(outptr + j*matrix_stride) = V[j]; - } - outptr++; - } - } -} - -template class WeightTransform<1, 5, 1, 8, float, float, WinogradRoots::Integers>; -template class WeightTransform<5, 1, 8, 1, float, float, WinogradRoots::Integers>; - -} // namespace winograd diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp deleted file mode 100644 index 7c2c718bd5..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_4x4_3x3_fp32_fp32_integers.cpp +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const float* const input, // NOTE: Data in HWIO order - float* const output, - const int matrix_stride, - const int matrix_row_stride -) -{ - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const auto weight_row_stride = 3 * weight_col_stride; - const float *inptrs[3][3]; - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride; - } - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - float *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; -#ifdef __aarch64__ - for (; channels_remaining >= 4; channels_remaining -= 4) - { - // Matrices used and computed in this kernel - float32x4_t w[3][3], Ww[6][3], V[6][6]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = vld1q_f32(inptrs[i][j]); - inptrs[i][j] += 4; - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - // Ww[0][j] = 6*w[0][j]; - Ww[0][j] = vmulq_n_f32(w[0][j], 6.0); - - // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; - Ww[1][j] = vmulq_n_f32(vaddq_f32(vaddq_f32(w[0][j], w[1][j]), w[2][j]), -4.0); - - // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; - Ww[2][j] = vmulq_n_f32(vsubq_f32(vsubq_f32(w[1][j], w[0][j]), w[2][j]), 4.0); - - // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; - Ww[3][j] = vmlaq_n_f32(vmlaq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); - - // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; - Ww[4][j] = vmlaq_n_f32(vmlsq_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); - - // Ww[5][j] = 24*w[2][j]; - Ww[5][j] = vmulq_n_f32(w[2][j], 24.0f); - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - const float recip576 = 1.0f / 576.0f; - - // V[i][0] = 6*Ww[i][0]; - V[i][0] = vmulq_n_f32(vmulq_n_f32(Ww[i][0], 6.0), recip576); - - // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]; - V[i][1] = vmulq_n_f32(vmulq_n_f32(vaddq_f32(vaddq_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576); - - // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]; - V[i][2] = vmulq_n_f32(vmulq_n_f32(vsubq_f32(vsubq_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576); - - // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]; - V[i][3] = vmulq_n_f32(vmlaq_n_f32(vmlaq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); - - // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]; - V[i][4] = vmulq_n_f32(vmlaq_n_f32(vmlsq_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); - - // V[i][5] = 24*Ww[i][2]; - V[i][5] = vmulq_n_f32(vmulq_n_f32(Ww[i][2], 24.0f), recip576); - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - vst1q_f32(outptr + m*matrix_stride, V[i][j]); - } - } - outptr += 4; - } -#endif // __aarch64__ -#ifdef __arm_any__ - for (; channels_remaining >= 2; channels_remaining -= 2) - { - // Matrices used and computed in this kernel - float32x2_t w[3][3], Ww[6][3], V[6][6]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = vld1_f32(inptrs[i][j]); - inptrs[i][j] += 2; - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - // Ww[0][j] = 6*w[0][j]; - Ww[0][j] = vmul_n_f32(w[0][j], 6.0); - - // Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; - Ww[1][j] = vmul_n_f32(vadd_f32(vadd_f32(w[0][j], w[1][j]), w[2][j]), -4.0); - - // Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; - Ww[2][j] = vmul_n_f32(vsub_f32(vsub_f32(w[1][j], w[0][j]), w[2][j]), 4.0); - - // Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; - Ww[3][j] = vmla_n_f32(vmla_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); - - // Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; - Ww[4][j] = vmla_n_f32(vmls_n_f32(w[0][j], w[1][j], 2.0f), w[2][j], 4.0f); - - // Ww[5][j] = 24*w[2][j]; - Ww[5][j] = vmul_n_f32(w[2][j], 24.0f); - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - const float recip576 = 1.0f / 576.0f; - - // V[i][0] = 6*Ww[i][0]; - V[i][0] = vmul_n_f32(vmul_n_f32(Ww[i][0], 6.0), recip576); - - // V[i][1] = -4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]; - V[i][1] = vmul_n_f32(vmul_n_f32(vadd_f32(vadd_f32(Ww[i][0], Ww[i][1]), Ww[i][2]), -4.0), recip576); - - // V[i][2] = -4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]; - V[i][2] = vmul_n_f32(vmul_n_f32(vsub_f32(vsub_f32(Ww[i][1], Ww[i][0]), Ww[i][2]), 4.0), recip576); - - // V[i][3] = 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]; - V[i][3] = vmul_n_f32(vmla_n_f32(vmla_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); - - // V[i][4] = 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]; - V[i][4] = vmul_n_f32(vmla_n_f32(vmls_n_f32(Ww[i][0], Ww[i][1], 2.0f), Ww[i][2], 4.0f), recip576); - - // V[i][5] = 24*Ww[i][2]; - V[i][5] = vmul_n_f32(vmul_n_f32(Ww[i][2], 24.0f), recip576); - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - vst1_f32(outptr + m*matrix_stride, V[i][j]); - } - } - outptr += 2; - } -#endif // __arm_any__ - for (; channels_remaining; channels_remaining--) - { - // Matrices used and computed in this kernel - float w[3][3], Ww[6][3], V[6][6]; - - // Read weights - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 3; j++) - { - w[i][j] = *(inptrs[i][j]++); - } - } - - // Compute the matrix W w - for (int j = 0; j < 3; j++) - { - Ww[0][j] = 6*w[0][j]; - Ww[1][j] = -4*w[0][j] + -4*w[1][j] + -4*w[2][j]; - Ww[2][j] = -4*w[0][j] + 4*w[1][j] + -4*w[2][j]; - Ww[3][j] = 1*w[0][j] + 2*w[1][j] + 4*w[2][j]; - Ww[4][j] = 1*w[0][j] + -2*w[1][j] + 4*w[2][j]; - Ww[5][j] = 24*w[2][j]; - } - - // Compute V = W w WT - for (int i = 0; i < 6; i++) - { - V[i][0] = ( 6*Ww[i][0]) / 576.0; - V[i][1] = (-4*Ww[i][0] + -4*Ww[i][1] + -4*Ww[i][2]) / 576.0; - V[i][2] = (-4*Ww[i][0] + 4*Ww[i][1] + -4*Ww[i][2]) / 576.0; - V[i][3] = ( 1*Ww[i][0] + 2*Ww[i][1] + 4*Ww[i][2]) / 576.0; - V[i][4] = ( 1*Ww[i][0] + -2*Ww[i][1] + 4*Ww[i][2]) / 576.0; - V[i][5] = (24*Ww[i][2]) / 576.0; - } - - // Store the transformed weights - for (int i = 0, m = 0; i < 6; i++) - { - for (int j = 0; j < 6; j++, m++) - { - *(outptr + m*matrix_stride) = V[i][j]; - } - } - outptr++; - } - } -} - -template class WeightTransform<3, 3, 6, 6, float, float, WinogradRoots::Integers>; - -} // namespace diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp deleted file mode 100644 index 9b42224eaf..0000000000 --- a/src/core/NEON/kernels/convolution/winograd/winograd_transforms/weights_6_3_fp32_fp32_integers.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm.hpp" -#include "kernel.hpp" - -namespace winograd -{ - -template <> -void WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>::execute( - const int n_output_channels, - const int n_input_channels, - const float* const input, // NOTE: Data in HWIO order - float* const output, - const int matrix_stride, - const int matrix_row_stride -) -{ - // Get pointers to each cell of the weight tensor - const auto weight_col_stride = n_input_channels * n_output_channels; - const float *inptrs[3]; - for (int j = 0; j < 3; j++) - { - inptrs[j] = input + j*weight_col_stride; - } - - // For each input channel - for (int ic = 0; ic < n_input_channels; ic++) - { - float *outptr = output + ic * matrix_row_stride; - - // For each output channel - int channels_remaining = n_output_channels; - for (; channels_remaining; channels_remaining--) - { - // Matrices used and computed in this kernel - float w[3], V[inner_tile_cols]; - - // Read weights - for (int j = 0; j < 3; j++) - { - w[j] = *(inptrs[j]++); - } - - // Compute V = w WT - V[0] = (w[0]*-1) / 36.0f; - V[1] = (w[1]*-1 + w[0]*1 + w[2]*1) / 48.0f; - V[2] = (w[0]*1 + w[1]*1 + w[2]*1) / 48.0f; - V[3] = (w[0]*-1 + w[2]*-4 + w[1]*2) / 120.0f; - V[4] = (w[0]*-1 + w[2]*-4 + w[1]*-2) / 120.0f; - V[5] = (w[1]*-3 + w[2]*9 + w[0]*1) / 720.0f; - V[6] = (w[1]*3 + w[2]*9 + w[0]*1) / 720.0f; - V[7] = (w[2]*1) / 1; - - // Store the transformed weights - for (int j = 0; j < inner_tile_cols; j++) - { - *(outptr + j*matrix_stride) = V[j]; - } - outptr++; - } - } -} - -template class WeightTransform<1, 3, 1, 8, float, float, WinogradRoots::Integers>; -template class WeightTransform<3, 1, 8, 1, float, float, WinogradRoots::Integers>; - -} // namespace |