/* * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ #define __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ #include "arm_compute/core/CPP/CPPTypes.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include "arm_compute/core/WindowIterator.h" namespace arm_compute { class ITensor; /** Unit of work for @ref NEGEMMInterleavedTransformAWrapper to process */ struct TransformAWorkload { /** Constructor * * @param[in] k0 First value to process along the K dimension. * @param[in] kmax Last value to process along the K dimension. * @param[in] multi Multi index. */ TransformAWorkload(unsigned int k0, unsigned int kmax, unsigned int multi) : _k0(k0), _kmax(kmax), _multi(multi) { } unsigned int _k0; /**< First value to process along the K dimension. */ unsigned int _kmax; /**< Last value to process along the K dimension. */ unsigned int _multi; /**< Multi index. */ }; /** Equivalent to arm_gemm::GemmInterleaved's Transform &workloads) = 0; /** Default destructor */ virtual ~NEGEMMInterleavedTransformAWrapper() = default; }; /** Type specialisations of @ref NEGEMMInterleavedTransformAWrapper */ template class NEGEMMInterleavedTransformAWrapperTemplate : public NEGEMMInterleavedTransformAWrapper { public: /** Configure the reshape A routine. * * @param[in] a Input matrix A. * @param[out] transformed_a Reshaped matrix A. * @param[in] transpose_a Also transpose A ? * @param[in] reinterpret_a_as_3d Re-interpret as 3D ? * @param[in] block_walker Window representing the layout of the matrix's blocks * @param[in] params M, N, K sizes. */ void configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, bool reinterpret_a_as_3d, const Window &block_walker, const INEGEMMWrapperKernel::Params ¶ms) { _a = a; _transformed_a = transformed_a; _transpose_a = transpose_a; _reinterpret_a_as_3d = reinterpret_a_as_3d; _Ksize = params.K; _Msize = params.M; _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension } // Inherited methods overridden: void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override { strategy strat(info.cpu_info); TensorAccessor a(*_a); TensorAccessor transformed_a(*_transformed_a); // Handle 3d input re-interpretation if(_reinterpret_a_as_3d) { Strides a_strides_as_3d = _a->info()->strides_in_bytes(); a_strides_as_3d.remove(Window::DimZ); a.set_strides(a_strides_as_3d); } unsigned int last_m = 0; //TODO: Create a new iterate_1D( DimY); int last_y = -1; auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) { if(id.y() != last_y) { last_y = id.y(); unsigned int batch = id.y(); unsigned int first_m = id.x(); if(first_m >= last_m) return; strat.transforms.PrepareA(transformed_a(0, first_m, batch), a(0, 0, batch, wl._multi), a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a); } }); auto on_new_row_size = [&](unsigned int, unsigned int end) { last_m = std::min(end, _Msize); }; window_iterator.iterate_2D(on_new_row_size); } void create_workloads(std::vector &workloads) override { execute_window_loop(_k_multi_window, [&](const Coordinates & id) { const unsigned int k0 = id.x(); const unsigned int multi = id.y(); const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize); workloads.push_back(TransformAWorkload(k0, kmax, multi)); }); } private: const ITensor *_a { nullptr }; ITensor *_transformed_a{ nullptr }; unsigned int _Msize{ 0 }; unsigned int _Ksize{ 0 }; bool _transpose_a{ false }; bool _reinterpret_a_as_3d{ false }; Window _k_multi_window{}; }; } // namespace arm_compute #endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ */