aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-07-17 16:48:42 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitc8e84b5a3872eda6748d77dbaf8548ad99f4c0cd (patch)
tree0c519a97b7f0ff89352a7736be1cae43b6dea10e /arm_compute
parent3efb37536149f438a68a1742c35d827e1fbd7860 (diff)
downloadComputeLibrary-c8e84b5a3872eda6748d77dbaf8548ad99f4c0cd.tar.gz
COMPMID-1405: Create our own gemm_native kernel / function.
Change-Id: Ie0a80bd6b4eb5632cac63ccf54bcb07d4309da19 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140305 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/Error.h2
-rw-r--r--arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h106
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h52
-rw-r--r--arm_compute/core/WindowIterator.h16
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h37
-rw-r--r--arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h56
7 files changed, 262 insertions, 8 deletions
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index 9f51fa234c..f137be6ecb 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -157,7 +157,7 @@ Status create_error(ErrorCode error_code, const char *function, const char *file
*
* @param[in] ... Variables which are unused.
*/
-#define ARM_COMPUTE_UNUSED(...) arm_compute::ignore_unused(__VA_ARGS__) // NOLINT
+#define ARM_COMPUTE_UNUSED(...) ::arm_compute::ignore_unused(__VA_ARGS__) // NOLINT
/** Creates an error with a given message
*
diff --git a/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
new file mode 100644
index 0000000000..02e5b58c9d
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_INEGEMMWRAPPERKERNEL_H__
+#define __ARM_COMPUTE_INEGEMMWRAPPERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Common interface for all the arm_gemm Gemms
+ */
+class INEGEMMWrapperKernel : public INEKernel
+{
+public:
+ /** Parameters defining the dimensions of the matrices being multiplied */
+ struct Params
+ {
+ unsigned int M; /**< Rows in output matrix C (and input matrix A). */
+ unsigned int N; /**< Columns in output matrix C (and input matrix B). */
+ unsigned int K; /**< Columns of input matrix A (= rows of input matrix B). */
+ unsigned int batches; /**< Number of "batched" GEMMs (unique A and C, shared B). */
+ unsigned int multis; /**< Number of "multi" GEMMs (unique A, B and C). */
+ };
+
+ static Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *c);
+
+ /** Constructor */
+ INEGEMMWrapperKernel();
+ /** Prevent instances of this class from being copied */
+ INEGEMMWrapperKernel(const INEGEMMWrapperKernel &) = delete;
+ /** Prevent instances of this class from being copied */
+ INEGEMMWrapperKernel &operator=(const INEGEMMWrapperKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ INEGEMMWrapperKernel(INEGEMMWrapperKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ INEGEMMWrapperKernel &operator=(INEGEMMWrapperKernel &&) = default;
+ /** Initialise the kernel's input and output.
+ *
+ * @note The input and output tensor must have the same dimensions
+ *
+ * @param[in] a Input tensor (Matrix A)
+ * @param[in] b Input tensor (Matrix B)
+ * @param[out] c Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
+ */
+ void configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+protected:
+ /** Called as part of configure() after _a, _b, _c and _params have been set.
+ *
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
+ *
+ * @return A 3D execution window.
+ */
+ virtual Window configure_internal(float alpha, float beta) = 0;
+
+ /** Run the kernel from the start to the end offset in window.
+ *
+ * @param[in] window Window to use for the iteration
+ * @param[in] start_offset Where to start iterating from (In Window coordinates)
+ * @param[in] end_offset Where to stop iterating (In Window coordinates).
+ * @param[in] info Info about executing thread and CPU.
+ */
+ virtual void run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info) = 0;
+
+ const ITensor *_a;
+ const ITensor *_b;
+ ITensor *_c;
+ Params _params;
+
+private:
+ Window _window3d;
+ TensorShape _window_shape;
+};
+
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_INEGEMMRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h
new file mode 100644
index 0000000000..73a0d7f05f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__
+
+#include "INEGEMMWrapperKernel.h"
+
+namespace arm_compute
+{
+/** Equivalent to arm_gemm::GemmNative but using Compute Library types.
+ */
+template <typename To, typename Tr>
+class NEGEMMNativeWrapperKernel : public INEGEMMWrapperKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEGEMMNativeWrapperKernel";
+ }
+
+protected:
+ // Inherited methods overridden:
+ Window configure_internal(float alpha, float beta) override;
+ void run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info) override;
+
+private:
+ Tr _beta{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/WindowIterator.h b/arm_compute/core/WindowIterator.h
index 13e9973506..13cb9cf2f4 100644
--- a/arm_compute/core/WindowIterator.h
+++ b/arm_compute/core/WindowIterator.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_WINDOW_ITERATOR_H__
#define __ARM_COMPUTE_WINDOW_ITERATOR_H__
#include "arm_compute/core/Coordinates.h"
+#include "arm_compute/core/Error.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Window.h"
@@ -71,9 +72,20 @@ public:
*/
inline size_t stride(size_t dim) const
{
+ ARM_COMPUTE_ERROR_ON(_strides[dim] % sizeof(T) != 0);
return _strides[dim] / sizeof(T);
}
+ /** Manually set the stride of a dimension
+ *
+ * @param[in] dim Dimension of the stride to set.
+ * @param[in] size Value to set the stride to (in bytes).
+ */
+ void set_stride(size_t dim, size_t size)
+ {
+ _strides[dim] = size;
+ }
+
/** Returns a pointer to the element at coordinates (x,y,z,w)
*
* @param[in] x X coordinates
@@ -99,8 +111,8 @@ public:
}
private:
- uint8_t *_first; /**< Pointer to the first element of the tensor.*/
- const Strides &_strides; /**< Strides in bytes of the tensor */
+ uint8_t *_first; /**< Pointer to the first element of the tensor.*/
+ Strides _strides; /**< Strides in bytes of the tensor */
};
/** Iterate over a portion of a Window */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 6a3fabca67..fdb1450179 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -104,6 +104,7 @@
#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
#include "arm_compute/runtime/NEON/functions/NEScale.h"
#include "arm_compute/runtime/NEON/functions/NEScharr3x3.h"
+#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
#include "arm_compute/runtime/NEON/functions/NESobel3x3.h"
#include "arm_compute/runtime/NEON/functions/NESobel5x5.h"
#include "arm_compute/runtime/NEON/functions/NESobel7x7.h"
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
index 4ac6a3cae2..1c9ecb088e 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -54,6 +54,20 @@ private:
/** ACL Function */
std::unique_ptr<IFunction> _function;
+ /** If supported create the ACL function corresponding to the GemmMethod provided to process the other passed parameters
+ *
+ * @param[in] method GemmMethod to use to perform the matrix multiplication.
+ * @param[in] a Input tensor (Matrix A).
+ * @param[in] b Input tensor (Matrix B).
+ * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
+ * @param[in] pretransposed_hint Can the B tensor can be pretransposed (ie shared across invocations)?
+ *
+ * @return True if the method is supported and the function was successfully created, false otherwise.
+ */
+ bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint);
+
//Fallback: use arm_gemm's AssemblyGemm:
class Fallback
{
@@ -63,7 +77,7 @@ private:
* The call to set_arrays is needed to deal with the input sizes containing batches (dims > 2)
*/
void run();
- void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, MemoryGroup &memory_group);
+ void configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<TypeOutput> &args, MemoryGroup &memory_group);
void prepare();
bool is_configured() const;
#endif /* DOXYGEN_SKIP_THIS */
@@ -102,7 +116,20 @@ private:
} _arm_gemm; /**< Fallback in case ACL doesn't have a function */
MemoryGroup _memory_group; /**< Function memory group */
public:
+ /** If supported create an ACL function else fallback to the arm_gemm function.
+ *
+ * @param[in] a Input tensor (Matrix A)
+ * @param[in] b Input tensor (Matrix B)
+ * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
+ * @param[in] pretranspose_hint Can the B tensor can be pretransposed (ie shared across invocations)?
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint);
+ /** Was the function successfully configured ?
+ *
+ * @return True if the function is configured and ready to run
+ */
bool is_configured() const;
// Inherited methods overridden:
/** Runs a preparation step, usually for pre-transposing matrix b */
@@ -110,11 +137,11 @@ public:
void run() override;
};
-/** Float 32 assembly kernel glue */
+/** Float 32 assembly dispatch kernel */
using NEGEMMAssemblyDispatchF32 = NEGEMMAssemblyDispatch<float, float>;
-/** Uint 8 to Uint 32 kernel glue */
+/** Uint 8 to Uint 32 assembly dispatch kernel */
using NEGEMMAssemblyDispatchU8U32 = NEGEMMAssemblyDispatch<uint8_t, uint32_t>;
-/** Int 8 to Int 32 kernel glue */
+/** Int 8 to Int 32 assembly dispatch kernel */
using NEGEMMAssemblyDispatchS8S32 = NEGEMMAssemblyDispatch<int8_t, int32_t>;
-}
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h b/arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h
new file mode 100644
index 0000000000..5a76c15627
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESIMPLEASSEMBLYFUNCTION_H__
+#define __ARM_COMPUTE_NESIMPLEASSEMBLYFUNCTION_H__
+
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+/** Basic interface for functions which have a single NEON GEMM wrapper kernel to run */
+class NESimpleAssemblyFunction : public IFunction
+{
+public:
+ /** Constructor */
+ NESimpleAssemblyFunction();
+
+ /** Configure the function with the kernel to run
+ *
+ * @param[in] kernel GEMM Wrapper kernel configured and ready to run
+ *
+ * @note The kernel is expected to have a 1D window. The function will multi-thread this window across the X dimension.
+ */
+ void configure(std::unique_ptr<INEGEMMWrapperKernel> kernel);
+
+ // Inherited methods overridden:
+ void run() override final;
+
+protected:
+ std::unique_ptr<INEGEMMWrapperKernel> _kernel; /**< Kernel to run */
+};
+} //namespace arm_compute
+#endif /*__ARM_COMPUTE_NESIMPLEASSEMBLYFUNCTION_H__ */