/* * Copyright (c) 2021-2023 Arm Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "depthwise_depthfirst.hpp" #include "interleaves/generic_quantized_dot_product.hpp" #include namespace arm_conv { namespace depthwise { template class DepthfirstMultiplierStrategy : public DepthwiseDepthfirstStrategyCommon { using Parent = DepthwiseDepthfirstStrategyCommon; protected: virtual interleaves::PackingArguments get_packing_args(const DepthwiseArgs &args) const { return interleaves::PackingArguments( args.kernel_rows, args.kernel_cols, sizeof(TWeight), true, sizeof(TAccum), this->uses_premultiply(), this->get_vl_type(), sizeof(TAccum), 1, [args] (unsigned int pos, unsigned int &x, unsigned int &y) -> bool { if (pos < args.kernel_rows * args.kernel_cols) { y = pos % args.kernel_cols; x = pos / args.kernel_cols; return true; } return false; } ); } bool uses_premultiply() const override { return false; } public: using Parent::Parent; size_t get_storage_size(const DepthwiseArgs &args) const override { return interleaves::get_storage_size_generic(this->get_packing_args(args), args); } void pack_parameters(const DepthwiseArgs &args, void *buffer, const void *biases, const Nothing &, const void *weights, size_t ld_weight_col, size_t ld_weight_row) const override { interleaves::pack_parameters_generic( this->get_packing_args(args), args, buffer, biases, weights, ld_weight_col, ld_weight_row ); } using KernelType = std::function; virtual KernelType get_kernel(void) const = 0; }; template class DepthfirstMultiplierStrategy : public DepthwiseDepthfirstStrategyCommon { using Parent = DepthwiseDepthfirstStrategyCommon; public: using Parent::Parent; size_t get_storage_size(const DepthwiseArgs &args) const override { return interleaves::quantized::get_storage_size(args, this->get_vl_type(), this->get_accumulator_depth_vl()); } void pack_parameters(const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, const void *weights, size_t ld_weight_col, size_t ld_weight_row) const override { interleaves::quantized::pack_parameters( buffer, reinterpret_cast(biases), reinterpret_cast(weights), ld_weight_col, ld_weight_row, args, qp, this->get_vl_type(), this->get_accumulator_depth_vl() ); } using KernelType = std::function; virtual KernelType get_kernel(void) const = 0; }; template class GenericDepthfirstMultiplierKernelStrategy { const arm_gemm::VLType m_vl_type; const unsigned int m_output_rows, m_output_cols; public: GenericDepthfirstMultiplierKernelStrategy(unsigned int output_rows, unsigned int output_cols, arm_gemm::VLType vl_type) : m_vl_type(vl_type), m_output_rows(output_rows), m_output_cols(output_cols) { } virtual ~GenericDepthfirstMultiplierKernelStrategy() = default; arm_gemm::VLType get_vl_type(void) const { return m_vl_type; } unsigned int get_output_rows(void) const { return m_output_rows; } unsigned int get_output_cols(void) const { return m_output_cols; } using KernelType = std::function; virtual KernelType get_kernel(void) const = 0; }; template class GenericDepthfirstMultiplierKernelStrategy { const arm_gemm::VLType m_vl_type; const unsigned int m_output_rows, m_output_cols; public: GenericDepthfirstMultiplierKernelStrategy(unsigned int output_rows, unsigned int output_cols, arm_gemm::VLType vl_type) : m_vl_type(vl_type), m_output_rows(output_rows), m_output_cols(output_cols) { } virtual ~GenericDepthfirstMultiplierKernelStrategy() = default; arm_gemm::VLType get_vl_type(void) const { return m_vl_type; } unsigned int get_output_rows(void) const { return m_output_rows; } unsigned int get_output_cols(void) const { return m_output_cols; } using KernelType = std::function; virtual KernelType get_kernel(void) const = 0; }; template ::Type, typename OutputStage=typename DefaultOutputStage::Type> class GenericDepthfirstMultiplierStrategy : public DepthwiseDepthfirstStrategyCommon { using KernelStrategyType = GenericDepthfirstMultiplierKernelStrategy; std::unique_ptr m_kern; protected: virtual interleaves::PackingArguments get_packing_args(const DepthwiseArgs &args) const { return interleaves::PackingArguments( args.kernel_rows, args.kernel_cols, sizeof(TWeight), false, sizeof(TAccum), this->uses_premultiply(), this->get_vl_type(), sizeof(TAccum), 1, [args] (unsigned int pos, unsigned int &x, unsigned int &y) -> bool { if (pos < args.kernel_rows * args.kernel_cols) { y = pos % args.kernel_cols; x = pos / args.kernel_cols; return true; } return false; } ); } bool uses_premultiply() const override { return false; } public: GenericDepthfirstMultiplierStrategy(KernelStrategyType *kern, const DepthwiseArgs &args) : DepthwiseDepthfirstStrategyCommon( kern->get_output_rows(), kern->get_output_cols(), args.kernel_rows, args.kernel_cols, args.stride_rows, args.stride_cols ), m_kern(kern) { }; arm_gemm::VLType get_vl_type(void) const override { return m_kern->get_vl_type(); } const typename KernelStrategyType::KernelType get_kernel(void) const { return m_kern->get_kernel(); } size_t get_storage_size(const DepthwiseArgs &args) const override { return interleaves::get_storage_size_generic(this->get_packing_args(args), args); } void pack_parameters(const DepthwiseArgs &args, void *buffer, const void *biases, const OutputStage &, const void *weights, size_t ld_weight_col, size_t ld_weight_row) const override { interleaves::pack_parameters_generic( this->get_packing_args(args), args, buffer, biases, weights, ld_weight_col, ld_weight_row ); } }; // Specialise elements of the wrapper based on the type of kernel. namespace depthfirst_multiplier { /* Working space element which contains a pointer for each row of input, a row * of padding, and a space which can be used to construct an NCHW-ordered patch * of input. */ template class InputPatchElement { public: struct Workspace { constexpr static bool InputPatchIsGeneric = IsGeneric; const T **input_rows; T *input_padding; T *input_patch; }; static size_t get_element_size(const WorkspaceArgs &args) { return sizeof_input_rows(args) + sizeof_input_padding(args) + sizeof_input_patch(args); } template static void *initialise(WorkspaceType *ws, void *buffer, const WorkspaceArgs &args) { auto buffer_bytes = reinterpret_cast(buffer); ws->input_rows = reinterpret_cast(buffer_bytes); buffer_bytes += sizeof_input_rows(args); ws->input_padding = reinterpret_cast(buffer_bytes); buffer_bytes += sizeof_input_padding(args); ws->input_patch = reinterpret_cast(buffer_bytes); buffer_bytes += sizeof_input_patch(args); // Initialise the padding memset(ws->input_padding, get_input_buffer_fill_value(args.output_stage), sizeof_input_padding(args)); return buffer_bytes; } protected: static size_t sizeof_input_rows(const WorkspaceArgs &args) { if (IsGeneric) { return sizeof(T *) * args.strategy->get_output_rows() * args.depthwise_args.kernel_rows * args.depthwise_args.kernel_cols; } else { return sizeof(T *) * args.strategy->get_input_rows(); } } static size_t sizeof_input_padding(const WorkspaceArgs &args) { // Round-up the number of columns to be a whole number of QUADS auto input_cols = arm_gemm::roundup(args.strategy->get_input_cols(), 16 / sizeof(T)); return sizeof(T) * input_cols; } static size_t sizeof_input_patch(const WorkspaceArgs &args) { if (IsGeneric) { // Round-up the number of columns to be a whole number of QUADS auto output_cols = arm_gemm::roundup(args.strategy->get_output_cols(), 16 / sizeof(T)); const auto kernel_points = args.depthwise_args.kernel_rows * args.depthwise_args.kernel_cols; return sizeof(T) * kernel_points * args.strategy->get_output_rows() * output_cols; } else { // Round-up the number of columns to be a whole number of QUADS auto input_cols = arm_gemm::roundup(args.strategy->get_input_cols(), 16 / sizeof(T)); return sizeof(T) * args.strategy->get_input_rows() * input_cols; } } }; template struct StrategyType { using Type = DepthfirstMultiplierStrategy; template static void execute( const DepthwiseArgs &args, const WorkspaceType *ws, const Type *strat, const OutputStage &, const unsigned int, const void *parameters, const void * ) { strat->get_kernel()( ws->input_rows, ws->outptr_array, parameters, args.channel_multiplier, ws->activation_min, ws->activation_max ); } }; template struct StrategyType { using Type = GenericDepthfirstMultiplierStrategy; template static void execute( const DepthwiseArgs &args, const WorkspaceType *ws, const Type *strat, const OutputStage &, const unsigned int start_output_channel, const void *parameters, const void *bias ) { strat->get_kernel()( ws->input_rows, ws->outptr_array, reinterpret_cast(parameters), bias == nullptr ? nullptr : reinterpret_cast(bias) + start_output_channel, strat->get_kernel_rows() * strat->get_kernel_cols(), args.channel_multiplier, ws->activation_min, ws->activation_max ); } }; template struct StrategyType { using Type = DepthfirstMultiplierStrategy; template static void execute( const DepthwiseArgs &args, const WorkspaceType *ws, const Type *strat, const arm_gemm::Requantize32 &qp, const unsigned int, const void *parameters, const void * ) { strat->get_kernel()( ws->input_rows, ws->outptr_array, parameters, args.channel_multiplier, qp ); } }; template struct StrategyType { using Type = GenericDepthfirstMultiplierStrategy; template static void execute( const DepthwiseArgs &args, const WorkspaceType *ws, const Type *strat, const arm_gemm::Requantize32 &qp, const unsigned int start_output_channel, const void *parameters, const void * ) { auto get_ptr = [start_output_channel] (const int32_t *ptr) -> const int32_t * { return ptr == nullptr ? nullptr : ptr + start_output_channel; }; strat->get_kernel()( ws->input_rows, ws->outptr_array, reinterpret_cast(parameters), get_ptr(qp.bias), strat->get_kernel_rows() * strat->get_kernel_cols(), args.channel_multiplier, get_ptr(qp.per_channel_left_shifts), get_ptr(qp.per_channel_muls), get_ptr(qp.per_channel_right_shifts), qp ); } }; template struct PrepareInputSample; template <> struct PrepareInputSample { template static void execute( const DepthwiseArgs &, WorkspaceType *ws, const StrategyType *strat, T *base_ptr, size_t ld_row, size_t ld_col, const unsigned int input_pad_top, const unsigned int valid_rows, const unsigned int input_pad_left, const unsigned int valid_cols ) { fill_nchw_patch_array( ws->input_rows, ws->input_patch, strat->get_input_rows(), strat->get_input_cols(), base_ptr, ld_row, ld_col, ws->input_padding, input_pad_top, valid_rows, input_pad_left, valid_cols ); } }; template <> struct PrepareInputSample { template static void execute( const DepthwiseArgs &args, WorkspaceType *ws, const StrategyType *strat, T *base_ptr, size_t ld_row, size_t ld_col, const unsigned int input_pad_top, const unsigned int valid_rows, const unsigned int input_pad_left, const unsigned int valid_cols ) { fill_patch_array_generic_kernel( ws->input_rows, ws->input_patch, strat->get_output_rows(), strat->get_output_cols(), args.kernel_rows, args.kernel_cols, args.stride_rows, args.stride_cols, base_ptr, ld_row, ld_col, ws->input_padding, input_pad_top, valid_rows, input_pad_left, valid_cols ); } }; } // namespace depthfirst_multiplier template ::Type, bool is_generic=false, typename OutputStage=typename DefaultOutputStage::Type> class DepthwiseDepthfirstMultiplier : public DepthfirstDriver { protected: using StratType = typename depthfirst_multiplier::StrategyType::Type; using WorkspaceManager = Workspace< OutputArrayElement, depthfirst_multiplier::InputPatchElement, ActivationsElement >; using WorkingSpace = typename WorkspaceManager::WorkspaceType; OutputStage m_os; // Copy of the output parameters const void *m_bias = nullptr; // Copy of the bias (should we need it) bool uses_premultiply() const override { return false; } public: DepthwiseDepthfirstMultiplier(StratType *const strat, const DepthwiseArgs &args, const OutputStage &os = {}) : DepthfirstDriver(strat, args), m_os(os) { } DepthwiseDepthfirstMultiplier(DepthwiseDepthfirstMultiplier &) = delete; DepthwiseDepthfirstMultiplier &operator=(DepthwiseDepthfirstMultiplier &) = delete; size_t get_storage_size(void) const override { return reinterpret_cast(this->m_strat.get()) ->get_storage_size(this->m_args); } void pack_parameters(void *buffer, const void *biases, const void *weights, size_t ld_weight_col, size_t ld_weight_row) override { reinterpret_cast(this->m_strat.get()) ->pack_parameters(this->m_args, buffer, biases, m_os, weights, ld_weight_col, ld_weight_row); m_bias = biases; depthwise_depthfirst::stash_bias(m_os, biases); } size_t get_working_size_per_thread() const override { DepthwiseArgs args(this->m_args); return WorkspaceManager::get_sizeof_workspace(WorkspaceArgs(this->m_strat.get(), args, m_os)); } void initialise_working_space(void *buffer) const override { DepthwiseArgs args(this->m_args); return WorkspaceManager::initialise(buffer, WorkspaceArgs(this->m_strat.get(), args, m_os)); } void compute_tile_padded( const DepthwiseArgs &args, unsigned int output_i, unsigned int output_j, unsigned int output_channel_start, unsigned int output_channel_end, const TensorSpec &input, const TensorSpec &output, const void *parameters, void *working_space_raw ) const override { // Get the working space auto ws = reinterpret_cast(working_space_raw); const int ii = static_cast(output_i * args.stride_rows) - args.padding.top; const auto input_pad_top = static_cast(ii < 0 ? -ii : 0); const auto input_i = static_cast(ii < 0 ? 0 : ii); const int ij = static_cast(output_j * args.stride_cols) - args.padding.left; const auto input_pad_left = static_cast(ij < 0 ? -ij : 0); const auto input_j = static_cast(ij < 0 ? 0 : ij); // Compute the output pointer array. We'll update this array after every // invocation of the kernel. fill_pointer_array( ws->outptr_array, this->m_strat->get_output_rows(), this->m_strat->get_output_cols(), output.base + output_i*output.ld_row + output_j*output.ld_col + output_channel_start, output.ld_row, output.ld_col, ws->output_buffer, 0, args.output_rows - output_i, // Top padding, # valid rows 0, args.output_cols - output_j // Left padding, # valid columns ); // Compute the parameter stride DepthwiseArgs single_iter(args); single_iter.input_channels = 1; const size_t parameter_stride = reinterpret_cast(this->m_strat.get()) ->get_storage_size(single_iter); for (; output_channel_start < output_channel_end; output_channel_start += args.channel_multiplier) { // Compute the input pointer array const auto input_channel = output_channel_start / args.channel_multiplier; // Construct the input patch depthfirst_multiplier::PrepareInputSample::execute( args, ws, this->m_strat.get(), input.base + input_channel + input_i*input.ld_row + input_j*input.ld_col, input.ld_row, input.ld_col, input_pad_top, args.input_rows - input_i, input_pad_left, args.input_cols - input_j ); // Execute the kernel depthfirst_multiplier::StrategyType::execute( args, ws, reinterpret_cast(this->m_strat.get()), m_os, output_channel_start, parameters, m_bias ); // Update the output pointers for (unsigned int n = 0; n < this->m_strat->get_output_rows() * this->m_strat->get_output_cols(); n++) { ws->outptr_array[n] += args.channel_multiplier; } // Progress the parameters parameters = reinterpret_cast(parameters) + parameter_stride; } } }; } // namespace depthwise } // namespace arm_conv