aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON/functions
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-07-06 17:05:59 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit71d9b57aac146ae3ad5648c1308a872cea90070d (patch)
treeb4f36f7c6a45782ef9e40fe4c6a9e9a68d716948 /arm_compute/runtime/NEON/functions
parentd1794ebfa10d05af7d2458c5d506152fd38068d3 (diff)
downloadComputeLibrary-71d9b57aac146ae3ad5648c1308a872cea90070d.tar.gz
COMPMID-1381: Cleaned up the AssemblyHelper interface
Introduced a new IFunction for when we'll fork the arm_gemm functions Increased encapsulation and abstraction of which method is used Change-Id: I5fd8b14b5c77e7f8ecb09029b5e2eccd10dbdcf4 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/139108 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'arm_compute/runtime/NEON/functions')
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMM.h7
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h120
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h20
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h8
5 files changed, 136 insertions, 25 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h
index cf059e5c4d..523f1d33a1 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMM.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMM.h
@@ -32,10 +32,9 @@
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/NEON/AssemblyHelper.h"
-
#include <memory>
namespace arm_compute
@@ -86,12 +85,10 @@ private:
NEGEMMInterleave4x4Kernel _interleave_kernel;
NEGEMMTranspose1xWKernel _transpose_kernel;
NEGEMMMatrixMultiplyKernel _mm_kernel;
- AssemblyKernelGlueF32 _asm_glue;
+ NEGEMMAssemblyDispatchF32 _asm_glue;
NEGEMMMatrixAdditionKernel _ma_kernel;
Tensor _tmp_a;
Tensor _tmp_b;
- Tensor _workspace;
- Tensor _B_pretransposed;
const ITensor *_original_b;
bool _run_vector_matrix_multiplication;
bool _run_addition;
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
new file mode 100644
index 0000000000..4ac6a3cae2
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__
+#define __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__
+
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include "arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp"
+
+namespace arm_compute
+{
+/** Assembly kernel glue */
+template <typename TypeInput, typename TypeOutput>
+class NEGEMMAssemblyDispatch : public IFunction
+{
+public:
+ /** Default constructor */
+ NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+
+ /** Prevent instances of this class from being copy constructed */
+ NEGEMMAssemblyDispatch(const NEGEMMAssemblyDispatch<TypeInput, TypeOutput> &) = delete;
+ /** Prevent instances of this class from being copied */
+ NEGEMMAssemblyDispatch<TypeInput, TypeOutput> &operator=(const NEGEMMAssemblyDispatch<TypeInput, TypeOutput> &) = delete;
+ NEGEMMAssemblyDispatch(NEGEMMAssemblyDispatch<TypeInput, TypeOutput> &&) = default;
+ NEGEMMAssemblyDispatch<TypeInput, TypeOutput> &operator=(NEGEMMAssemblyDispatch<TypeInput, TypeOutput> &&) = default;
+ ~NEGEMMAssemblyDispatch() = default;
+
+private:
+ /** ACL Function */
+ std::unique_ptr<IFunction> _function;
+
+ //Fallback: use arm_gemm's AssemblyGemm:
+ class Fallback
+ {
+#ifndef DOXYGEN_SKIP_THIS
+ public:
+ /** Configures the arrays pointers and strides in the assembly kernel and executes the assembly kernel.
+ * The call to set_arrays is needed to deal with the input sizes containing batches (dims > 2)
+ */
+ void run();
+ void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, MemoryGroup &memory_group);
+ void prepare();
+ bool is_configured() const;
+#endif /* DOXYGEN_SKIP_THIS */
+
+ private:
+ /** Allocate a workspace tensor.
+ *
+ * @param[in] workspace_size Size to allocate.
+ * @param[in] memory_group Tensor memory group.
+ * @param[in] alignment Workspace memory alignment.
+ */
+ void allocate_workspace(size_t workspace_size, MemoryGroup *memory_group, size_t alignment);
+
+ /** Assembly Gemm kernel */
+ std::unique_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
+ /** Optimised NEON kernel */
+ std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
+ /** Input A */
+ const ITensor *_a
+ {
+ nullptr
+ };
+ /** Input B */
+ const ITensor *_b
+ {
+ nullptr
+ };
+ /** Output */
+ ITensor *_d{ nullptr };
+ /** GEMM workspace */
+ Tensor _workspace{};
+ /** Pre-transpose tensor */
+ Tensor _pretranspose{};
+ /** Prepared flag */
+ bool _is_prepared{ false };
+ } _arm_gemm; /**< Fallback in case ACL doesn't have a function */
+ MemoryGroup _memory_group; /**< Function memory group */
+public:
+ void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint);
+ bool is_configured() const;
+ // Inherited methods overridden:
+ /** Runs a preparation step, usually for pre-transposing matrix b */
+ void prepare() override;
+ void run() override;
+};
+
+/** Float 32 assembly kernel glue */
+using NEGEMMAssemblyDispatchF32 = NEGEMMAssemblyDispatch<float, float>;
+/** Uint 8 to Uint 32 kernel glue */
+using NEGEMMAssemblyDispatchU8U32 = NEGEMMAssemblyDispatch<uint8_t, uint32_t>;
+/** Int 8 to Int 32 kernel glue */
+using NEGEMMAssemblyDispatchS8S32 = NEGEMMAssemblyDispatch<int8_t, int32_t>;
+}
+#endif /* __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index 68e1145e35..1564b6c983 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -37,8 +37,8 @@
#include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/AssemblyHelper.h"
#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
#include "arm_compute/runtime/Tensor.h"
@@ -168,8 +168,8 @@ private:
void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo());
private:
- AssemblyKernelGlueF32 _asm_glue;
MemoryGroup _memory_group;
+ NEGEMMAssemblyDispatchF32 _asm_glue;
NEIm2ColKernel _input_im2col_kernel;
NEGEMMInterleave4x4Kernel _input_interleave_kernel;
NEConvolutionLayerReshapeWeights _reshape_weights;
@@ -187,8 +187,6 @@ private:
Tensor _weights_reshaped;
Tensor _gemm_output;
Tensor _tmp_output;
- Tensor _workspace;
- Tensor _B_pretransposed;
DataLayout _data_layout;
bool _append_bias;
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 11ca1bc313..b6672d7584 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -29,7 +29,7 @@
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/AssemblyHelper.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "arm_compute/runtime/Tensor.h"
#include <memory>
@@ -58,16 +58,14 @@ public:
void run() override;
private:
- MemoryGroup _memory_group;
- AssemblyKernelGlueU8U32 _asm_glue_unsigned;
- AssemblyKernelGlueS8S32 _asm_glue_signed;
- std::unique_ptr<INEKernel> _mm_kernel;
- std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
- std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
- Tensor _tmp_a;
- Tensor _tmp_b;
- Tensor _workspace;
- Tensor _B_pretransposed;
+ MemoryGroup _memory_group;
+ NEGEMMAssemblyDispatchU8U32 _asm_glue_unsigned;
+ NEGEMMAssemblyDispatchS8S32 _asm_glue_signed;
+ std::unique_ptr<INEKernel> _mm_kernel;
+ std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
+ std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
+ Tensor _tmp_a;
+ Tensor _tmp_b;
};
}
#endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index f32eb3c757..96ac7bb7e0 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -30,7 +30,7 @@
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/AssemblyHelper.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "arm_compute/runtime/Tensor.h"
#include <memory>
@@ -98,8 +98,8 @@ public:
private:
MemoryGroup _memory_group;
- AssemblyKernelGlueU8U32 _asm_glue_unsigned;
- AssemblyKernelGlueS8S32 _asm_glue_signed;
+ NEGEMMAssemblyDispatchU8U32 _asm_glue_unsigned;
+ NEGEMMAssemblyDispatchS8S32 _asm_glue_signed;
std::unique_ptr<INEKernel> _mm_kernel;
std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
@@ -110,8 +110,6 @@ private:
Tensor _vector_sum_row;
Tensor _tmp_a;
Tensor _tmp_b;
- Tensor _workspace;
- Tensor _B_pretranspose;
const ITensor *_original_b;
int32_t _a_offset;
int32_t _b_offset;