aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-07-17 16:48:42 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitc8e84b5a3872eda6748d77dbaf8548ad99f4c0cd (patch)
tree0c519a97b7f0ff89352a7736be1cae43b6dea10e
parent3efb37536149f438a68a1742c35d827e1fbd7860 (diff)
downloadComputeLibrary-c8e84b5a3872eda6748d77dbaf8548ad99f4c0cd.tar.gz
COMPMID-1405: Create our own gemm_native kernel / function.
Change-Id: Ie0a80bd6b4eb5632cac63ccf54bcb07d4309da19 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140305 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--SConscript2
-rw-r--r--arm_compute/core/Error.h2
-rw-r--r--arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h106
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h52
-rw-r--r--arm_compute/core/WindowIterator.h16
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h37
-rw-r--r--arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h56
-rw-r--r--src/core/NEON/kernels/assembly/INEGEMMWrapperKernel.cpp80
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp119
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp114
-rw-r--r--src/runtime/NEON/functions/NESimpleAssemblyFunction.cpp46
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp2
13 files changed, 598 insertions, 35 deletions
diff --git a/SConscript b/SConscript
index 983570b7dc..bfc3e78b70 100644
--- a/SConscript
+++ b/SConscript
@@ -190,6 +190,7 @@ if env['opencl']:
if env['neon']:
core_files += Glob('src/core/NEON/*.cpp')
core_files += Glob('src/core/NEON/kernels/*.cpp')
+ core_files += Glob('src/core/NEON/kernels/assembly/*.cpp')
core_files += Glob('src/core/NEON/kernels/arm_gemm/*.cpp')
@@ -209,6 +210,7 @@ if env['neon']:
runtime_files += Glob('src/runtime/NEON/*.cpp')
runtime_files += Glob('src/runtime/NEON/functions/*.cpp')
+ runtime_files += Glob('src/runtime/NEON/functions/assembly/*.cpp')
if env['gles_compute']:
if env['os'] != 'android':
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index 9f51fa234c..f137be6ecb 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -157,7 +157,7 @@ Status create_error(ErrorCode error_code, const char *function, const char *file
*
* @param[in] ... Variables which are unused.
*/
-#define ARM_COMPUTE_UNUSED(...) arm_compute::ignore_unused(__VA_ARGS__) // NOLINT
+#define ARM_COMPUTE_UNUSED(...) ::arm_compute::ignore_unused(__VA_ARGS__) // NOLINT
/** Creates an error with a given message
*
diff --git a/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
new file mode 100644
index 0000000000..02e5b58c9d
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_INEGEMMWRAPPERKERNEL_H__
+#define __ARM_COMPUTE_INEGEMMWRAPPERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Common interface for all the arm_gemm Gemms
+ */
+class INEGEMMWrapperKernel : public INEKernel
+{
+public:
+ /** Parameters defining the dimensions of the matrices being multiplied */
+ struct Params
+ {
+ unsigned int M; /**< Rows in output matrix C (and input matrix A). */
+ unsigned int N; /**< Columns in output matrix C (and input matrix B). */
+ unsigned int K; /**< Columns of input matrix A (= rows of input matrix B). */
+ unsigned int batches; /**< Number of "batched" GEMMs (unique A and C, shared B). */
+ unsigned int multis; /**< Number of "multi" GEMMs (unique A, B and C). */
+ };
+
+ static Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *c);
+
+ /** Constructor */
+ INEGEMMWrapperKernel();
+ /** Prevent instances of this class from being copied */
+ INEGEMMWrapperKernel(const INEGEMMWrapperKernel &) = delete;
+ /** Prevent instances of this class from being copied */
+ INEGEMMWrapperKernel &operator=(const INEGEMMWrapperKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ INEGEMMWrapperKernel(INEGEMMWrapperKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ INEGEMMWrapperKernel &operator=(INEGEMMWrapperKernel &&) = default;
+ /** Initialise the kernel's input and output.
+ *
+ * @note The input and output tensor must have the same dimensions
+ *
+ * @param[in] a Input tensor (Matrix A)
+ * @param[in] b Input tensor (Matrix B)
+ * @param[out] c Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
+ */
+ void configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+protected:
+ /** Called as part of configure() after _a, _b, _c and _params have been set.
+ *
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
+ *
+ * @return A 3D execution window.
+ */
+ virtual Window configure_internal(float alpha, float beta) = 0;
+
+ /** Run the kernel from the start to the end offset in window.
+ *
+ * @param[in] window Window to use for the iteration
+ * @param[in] start_offset Where to start iterating from (In Window coordinates)
+ * @param[in] end_offset Where to stop iterating (In Window coordinates).
+ * @param[in] info Info about executing thread and CPU.
+ */
+ virtual void run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info) = 0;
+
+ const ITensor *_a;
+ const ITensor *_b;
+ ITensor *_c;
+ Params _params;
+
+private:
+ Window _window3d;
+ TensorShape _window_shape;
+};
+
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_INEGEMMRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h
new file mode 100644
index 0000000000..73a0d7f05f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__
+
+#include "INEGEMMWrapperKernel.h"
+
+namespace arm_compute
+{
+/** Equivalent to arm_gemm::GemmNative but using Compute Library types.
+ */
+template <typename To, typename Tr>
+class NEGEMMNativeWrapperKernel : public INEGEMMWrapperKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEGEMMNativeWrapperKernel";
+ }
+
+protected:
+ // Inherited methods overridden:
+ Window configure_internal(float alpha, float beta) override;
+ void run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info) override;
+
+private:
+ Tr _beta{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/WindowIterator.h b/arm_compute/core/WindowIterator.h
index 13e9973506..13cb9cf2f4 100644
--- a/arm_compute/core/WindowIterator.h
+++ b/arm_compute/core/WindowIterator.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_WINDOW_ITERATOR_H__
#define __ARM_COMPUTE_WINDOW_ITERATOR_H__
#include "arm_compute/core/Coordinates.h"
+#include "arm_compute/core/Error.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Window.h"
@@ -71,9 +72,20 @@ public:
*/
inline size_t stride(size_t dim) const
{
+ ARM_COMPUTE_ERROR_ON(_strides[dim] % sizeof(T) != 0);
return _strides[dim] / sizeof(T);
}
+ /** Manually set the stride of a dimension
+ *
+ * @param[in] dim Dimension of the stride to set.
+ * @param[in] size Value to set the stride to (in bytes).
+ */
+ void set_stride(size_t dim, size_t size)
+ {
+ _strides[dim] = size;
+ }
+
/** Returns a pointer to the element at coordinates (x,y,z,w)
*
* @param[in] x X coordinates
@@ -99,8 +111,8 @@ public:
}
private:
- uint8_t *_first; /**< Pointer to the first element of the tensor.*/
- const Strides &_strides; /**< Strides in bytes of the tensor */
+ uint8_t *_first; /**< Pointer to the first element of the tensor.*/
+ Strides _strides; /**< Strides in bytes of the tensor */
};
/** Iterate over a portion of a Window */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 6a3fabca67..fdb1450179 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -104,6 +104,7 @@
#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
#include "arm_compute/runtime/NEON/functions/NEScale.h"
#include "arm_compute/runtime/NEON/functions/NEScharr3x3.h"
+#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
#include "arm_compute/runtime/NEON/functions/NESobel3x3.h"
#include "arm_compute/runtime/NEON/functions/NESobel5x5.h"
#include "arm_compute/runtime/NEON/functions/NESobel7x7.h"
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
index 4ac6a3cae2..1c9ecb088e 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -54,6 +54,20 @@ private:
/** ACL Function */
std::unique_ptr<IFunction> _function;
+ /** If supported create the ACL function corresponding to the GemmMethod provided to process the other passed parameters
+ *
+ * @param[in] method GemmMethod to use to perform the matrix multiplication.
+ * @param[in] a Input tensor (Matrix A).
+ * @param[in] b Input tensor (Matrix B).
+ * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
+ * @param[in] pretransposed_hint Can the B tensor can be pretransposed (ie shared across invocations)?
+ *
+ * @return True if the method is supported and the function was successfully created, false otherwise.
+ */
+ bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint);
+
//Fallback: use arm_gemm's AssemblyGemm:
class Fallback
{
@@ -63,7 +77,7 @@ private:
* The call to set_arrays is needed to deal with the input sizes containing batches (dims > 2)
*/
void run();
- void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, MemoryGroup &memory_group);
+ void configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<TypeOutput> &args, MemoryGroup &memory_group);
void prepare();
bool is_configured() const;
#endif /* DOXYGEN_SKIP_THIS */
@@ -102,7 +116,20 @@ private:
} _arm_gemm; /**< Fallback in case ACL doesn't have a function */
MemoryGroup _memory_group; /**< Function memory group */
public:
+ /** If supported create an ACL function else fallback to the arm_gemm function.
+ *
+ * @param[in] a Input tensor (Matrix A)
+ * @param[in] b Input tensor (Matrix B)
+ * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
+ * @param[in] pretranspose_hint Can the B tensor can be pretransposed (ie shared across invocations)?
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint);
+ /** Was the function successfully configured ?
+ *
+ * @return True if the function is configured and ready to run
+ */
bool is_configured() const;
// Inherited methods overridden:
/** Runs a preparation step, usually for pre-transposing matrix b */
@@ -110,11 +137,11 @@ public:
void run() override;
};
-/** Float 32 assembly kernel glue */
+/** Float 32 assembly dispatch kernel */
using NEGEMMAssemblyDispatchF32 = NEGEMMAssemblyDispatch<float, float>;
-/** Uint 8 to Uint 32 kernel glue */
+/** Uint 8 to Uint 32 assembly dispatch kernel */
using NEGEMMAssemblyDispatchU8U32 = NEGEMMAssemblyDispatch<uint8_t, uint32_t>;
-/** Int 8 to Int 32 kernel glue */
+/** Int 8 to Int 32 assembly dispatch kernel */
using NEGEMMAssemblyDispatchS8S32 = NEGEMMAssemblyDispatch<int8_t, int32_t>;
-}
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h b/arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h
new file mode 100644
index 0000000000..5a76c15627
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESIMPLEASSEMBLYFUNCTION_H__
+#define __ARM_COMPUTE_NESIMPLEASSEMBLYFUNCTION_H__
+
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+/** Basic interface for functions which have a single NEON GEMM wrapper kernel to run */
+class NESimpleAssemblyFunction : public IFunction
+{
+public:
+ /** Constructor */
+ NESimpleAssemblyFunction();
+
+ /** Configure the function with the kernel to run
+ *
+ * @param[in] kernel GEMM Wrapper kernel configured and ready to run
+ *
+ * @note The kernel is expected to have a 1D window. The function will multi-thread this window across the X dimension.
+ */
+ void configure(std::unique_ptr<INEGEMMWrapperKernel> kernel);
+
+ // Inherited methods overridden:
+ void run() override final;
+
+protected:
+ std::unique_ptr<INEGEMMWrapperKernel> _kernel; /**< Kernel to run */
+};
+} //namespace arm_compute
+#endif /*__ARM_COMPUTE_NESIMPLEASSEMBLYFUNCTION_H__ */
diff --git a/src/core/NEON/kernels/assembly/INEGEMMWrapperKernel.cpp b/src/core/NEON/kernels/assembly/INEGEMMWrapperKernel.cpp
new file mode 100644
index 0000000000..0fc3610014
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/INEGEMMWrapperKernel.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/WindowIterator.h"
+
+using namespace arm_compute;
+
+INEGEMMWrapperKernel::INEGEMMWrapperKernel()
+ : _a(nullptr), _b(nullptr), _c(nullptr), _params(), _window3d(), _window_shape()
+{
+}
+
+INEGEMMWrapperKernel::Params INEGEMMWrapperKernel::extract_parameters(const ITensor *a, const ITensor *b, const ITensor *c)
+{
+ Params p;
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(a);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(b);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(c);
+
+ p.M = c->info()->tensor_shape().y();
+ p.N = c->info()->tensor_shape().x();
+ p.K = a->info()->tensor_shape().x();
+ p.multis = b->info()->tensor_shape().z();
+ p.batches = c->info()->tensor_shape().total_size_upper(2) / p.multis; //COMPMID-1423: Agree on and document the layout of gemm inputs/outputs
+
+ return p;
+}
+
+void INEGEMMWrapperKernel::configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta)
+{
+ _params = extract_parameters(a, b, c);
+ _a = a;
+ _b = b;
+ _c = c;
+
+ _window3d = configure_internal(alpha, beta);
+ _window_shape = _window3d.shape();
+
+ // Convert the 3D window into a 1D window in order to allow the scheduler to arbitrary split it.
+ Window collapsed;
+ collapsed.set(0, Window::Dimension(0, _window3d.num_iterations_total()));
+
+ INEKernel::configure(collapsed);
+}
+
+void INEGEMMWrapperKernel::run(const Window &window, const ThreadInfo &info)
+{
+ const Coordinates start_offset = index2coords(_window_shape, window.x().start());
+ const Coordinates end_offset = index2coords(_window_shape, window.x().end() - 1);
+
+ run_internal(_window3d, start_offset, end_offset, info);
+}
diff --git a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
new file mode 100644
index 0000000000..38eb40a06e
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/WindowIterator.h"
+
+#include "../arm_gemm/utils.hpp"
+#include "arm_gemm.hpp"
+
+#include "../arm_gemm/mergeresults.hpp"
+#include "../arm_gemm/transform.hpp"
+
+#include "../arm_gemm/kernels/a32_sgemm_8x6.hpp"
+#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp"
+#include "../arm_gemm/kernels/a64_sgemm_native_16x4.hpp"
+#include "../arm_gemm/kernels/a64_sgemv_pretransposed.hpp"
+#include "../arm_gemm/kernels/a64_sgemv_trans.hpp"
+
+namespace arm_compute
+{
+template <typename To, typename Tr>
+struct Kernel
+{
+};
+
+#ifdef __aarch64__
+template <>
+struct Kernel<float, float>
+{
+ using strategy = arm_gemm::sgemm_native_16x4;
+};
+#endif /* __aarch64__ */
+
+template <typename To, typename Tr>
+Window NEGEMMNativeWrapperKernel<To, Tr>::configure_internal(float alpha, float beta)
+{
+ using strategy = typename Kernel<To, Tr>::strategy;
+
+ _beta = beta;
+
+ //Note: The window is shifted down by 1 dimension compare to the tensors
+ Window window;
+ window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_params.M, strategy::out_height()), strategy::out_height()));
+ window.set(Window::DimY, Window::Dimension(0, _params.batches));
+ window.set(Window::DimZ, Window::Dimension(0, _params.multis));
+
+ return window;
+}
+
+template <typename To, typename Tr>
+void NEGEMMNativeWrapperKernel<To, Tr>::run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info)
+{
+ using strategy = typename Kernel<To, Tr>::strategy;
+
+ TensorAccessor<To> a(*_a);
+ TensorAccessor<To> b(*_b);
+ TensorAccessor<Tr> c(*_c);
+
+ if(_a->info()->data_layout() == DataLayout::NHWC)
+ {
+ // In the case of NHWC we want to interpret the output shape as 3D. Thus, the batch stride for A is
+ // the relevant multiple of the row stride.
+ const size_t nhwc_batch_stride = _a->info()->strides_in_bytes().y() * _c->info()->dimension(1);
+ a.set_stride(2, nhwc_batch_stride);
+ }
+
+ unsigned int m_end = 0;
+
+ strategy strat(info.cpu_info);
+ auto window_iterator = arm_compute::create_window_iterator(window, start_offset, end_offset, [&](const Coordinates & id)
+ {
+ const unsigned int y0 = id.x();
+ const unsigned int batch = id.y();
+ const unsigned int multi = id.z();
+ const unsigned int ymax = std::min(y0 + strategy::out_height(), m_end);
+
+ strat.kernel(a(0, y0, batch, multi), a.stride(Window::DimY),
+ b(0, 0, 0, multi), b.stride(Window::DimY),
+ c(0, y0, batch, multi), c.stride(Window::DimY),
+ _beta, (ymax - y0), _params.N, _params.K);
+ });
+
+ auto on_new_row_size = [&](unsigned int start, unsigned int end)
+ {
+ m_end = std::min(end, _params.M);
+ };
+
+ window_iterator.iterate_3D(on_new_row_size);
+}
+
+#ifdef __aarch64__
+template class NEGEMMNativeWrapperKernel<float, float>;
+#endif /* __aarch64__ */
+
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index e796a6a56e..f4710fab84 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -23,21 +23,74 @@
*/
#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
template <typename TypeInput, typename TypeOutput>
NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
: _function(nullptr), _arm_gemm(), _memory_group(std::move(memory_manager))
{
}
+template <>
+bool NEGEMMAssemblyDispatch<float, float>::create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
+{
+ ARM_COMPUTE_UNUSED(method);
+ ARM_COMPUTE_UNUSED(a);
+ ARM_COMPUTE_UNUSED(b);
+ ARM_COMPUTE_UNUSED(d);
+ ARM_COMPUTE_UNUSED(alpha);
+ ARM_COMPUTE_UNUSED(beta);
+ ARM_COMPUTE_UNUSED(pretranspose_hint);
+ switch(method)
+ {
+#ifdef __aarch64__
+ case arm_gemm::GemmMethod::GEMM_NATIVE:
+ {
+ auto kernel = support::cpp14::make_unique<NEGEMMNativeWrapperKernel<float, float>>();
+ kernel->configure(a, b, d, alpha, beta);
+ auto function = support::cpp14::make_unique<NESimpleAssemblyFunction>();
+ function->configure(std::move(kernel));
+ _function = std::move(function);
+ return true;
+ }
+#endif /* __aarch64__ */
+ default:
+ return false;
+ }
+}
+
+template <typename TypeInput, typename TypeOutput>
+bool NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
+{
+ ARM_COMPUTE_UNUSED(method);
+ ARM_COMPUTE_UNUSED(a);
+ ARM_COMPUTE_UNUSED(b);
+ ARM_COMPUTE_UNUSED(d);
+ ARM_COMPUTE_UNUSED(alpha);
+ ARM_COMPUTE_UNUSED(beta);
+ ARM_COMPUTE_UNUSED(pretranspose_hint);
+ return false;
+}
+
template <typename TypeInput, typename TypeOutput>
void NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
{
- //TODO(antbar01) Check heuristics here to figure out if we should use an ACL IFunction
- _arm_gemm.configure(a, b, d, alpha, beta, pretranspose_hint, _memory_group);
+ INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d);
+ const CPUInfo &ci = NEScheduler::get().cpu_info();
+ unsigned int num_threads = NEScheduler::get().num_threads();
+
+ arm_gemm::GemmArgs<TypeOutput> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint);
+
+ //Try to create an ACL function:
+ if(!create_function(arm_gemm::get_gemm_method<TypeInput, TypeOutput>(args), a, b, d, alpha, beta, pretranspose_hint))
+ {
+ //Fallback onto arm_gemm function if ACL doesn't support this method.
+ _arm_gemm.configure(a, b, d, args, _memory_group);
+ }
}
template <typename TypeInput, typename TypeOutput>
@@ -75,10 +128,8 @@ void NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::run()
}
#ifndef __aarch64__
-namespace arm_compute
-{
template <>
-void NEGEMMAssemblyDispatch<uint8_t, uint32_t>::Fallback::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, MemoryGroup &memory_group)
+void NEGEMMAssemblyDispatch<uint8_t, uint32_t>::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
{
// arm_gemm::gemm for 8bit only exists for aarch64
ARM_COMPUTE_UNUSED(a);
@@ -87,11 +138,11 @@ void NEGEMMAssemblyDispatch<uint8_t, uint32_t>::Fallback::configure(const ITenso
ARM_COMPUTE_UNUSED(alpha);
ARM_COMPUTE_UNUSED(beta);
ARM_COMPUTE_UNUSED(pretranspose_hint);
- ARM_COMPUTE_UNUSED(memory_group);
+ ARM_COMPUTE_ERROR("Not supported for this architecture");
}
template <>
-void NEGEMMAssemblyDispatch<int8_t, int32_t>::Fallback::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, MemoryGroup &memory_group)
+void NEGEMMAssemblyDispatch<int8_t, int32_t>::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
{
// arm_gemm::gemm for 8bit only exists for aarch64
ARM_COMPUTE_UNUSED(a);
@@ -100,23 +151,37 @@ void NEGEMMAssemblyDispatch<int8_t, int32_t>::Fallback::configure(const ITensor
ARM_COMPUTE_UNUSED(alpha);
ARM_COMPUTE_UNUSED(beta);
ARM_COMPUTE_UNUSED(pretranspose_hint);
+ ARM_COMPUTE_ERROR("Not supported for this architecture");
+}
+
+template <>
+void NEGEMMAssemblyDispatch<uint8_t, uint32_t>::Fallback::configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<uint32_t> &args, MemoryGroup &memory_group)
+{
+ // arm_gemm::gemm for 8bit only exists for aarch64
+ ARM_COMPUTE_UNUSED(a);
+ ARM_COMPUTE_UNUSED(b);
+ ARM_COMPUTE_UNUSED(d);
+ ARM_COMPUTE_UNUSED(args);
ARM_COMPUTE_UNUSED(memory_group);
+ ARM_COMPUTE_ERROR("Not supported for this architecture");
}
-} //namespace arm_compute
+template <>
+void NEGEMMAssemblyDispatch<int8_t, int32_t>::Fallback::configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<int32_t> &args, MemoryGroup &memory_group)
+{
+ // arm_gemm::gemm for 8bit only exists for aarch64
+ ARM_COMPUTE_UNUSED(a);
+ ARM_COMPUTE_UNUSED(b);
+ ARM_COMPUTE_UNUSED(d);
+ ARM_COMPUTE_UNUSED(args);
+ ARM_COMPUTE_UNUSED(memory_group);
+ ARM_COMPUTE_ERROR("Not supported for this architecture");
+}
#endif // aarch64
template <typename TypeInput, typename TypeOutput>
-void NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::Fallback::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, MemoryGroup &memory_group)
+void NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::Fallback::configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<TypeOutput> &args, MemoryGroup &memory_group)
{
- const CPUInfo &ci = NEScheduler::get().cpu_info();
- const int M = d->info()->tensor_shape().y();
- const int N = d->info()->tensor_shape().x();
- const int K = a->info()->tensor_shape().x();
- const int batches = d->info()->tensor_shape().total_size_upper(2);
- const int multis = b->info()->tensor_shape().z();
- unsigned int num_threads = NEScheduler::get().num_threads();
-
- _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput>(ci, M, N, K, batches, multis, false, false, alpha, beta, num_threads, pretranspose_hint);
+ _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput>(args, nullptr);
if(_gemm_kernel_asm == nullptr)
{
//configuration not supported: Leave function unconfigured:
@@ -139,11 +204,10 @@ void NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::Fallback::configure(const IT
//if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
//the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
{
- const unsigned int window_size = _gemm_kernel_asm->get_window_size();
- if(window_size < num_threads)
+ const int window_size = _gemm_kernel_asm->get_window_size();
+ if(window_size < args._maxthreads)
{
- num_threads = window_size;
- _gemm_kernel_asm->set_nthreads(num_threads);
+ _gemm_kernel_asm->set_nthreads(window_size);
}
}
@@ -248,8 +312,6 @@ void NEGEMMAssemblyDispatch<TypeInput, TypeOutput>::Fallback::run()
NEScheduler::get().schedule(_optimised_kernel.get(), Window::DimX);
}
-namespace arm_compute
-{
template class NEGEMMAssemblyDispatch<float, float>;
template class NEGEMMAssemblyDispatch<uint8_t, uint32_t>;
template class NEGEMMAssemblyDispatch<int8_t, int32_t>;
diff --git a/src/runtime/NEON/functions/NESimpleAssemblyFunction.cpp b/src/runtime/NEON/functions/NESimpleAssemblyFunction.cpp
new file mode 100644
index 0000000000..a4b0dfffaa
--- /dev/null
+++ b/src/runtime/NEON/functions/NESimpleAssemblyFunction.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
+
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+using namespace arm_compute;
+
+NESimpleAssemblyFunction::NESimpleAssemblyFunction() // NOLINT
+ : _kernel()
+{
+}
+
+void NESimpleAssemblyFunction::run()
+{
+ NEScheduler::get().schedule(_kernel.get(), Window::DimX);
+}
+
+void NESimpleAssemblyFunction::configure(std::unique_ptr<INEGEMMWrapperKernel> kernel)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(kernel.get());
+ _kernel = std::move(kernel);
+ ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(_kernel->window(), 1);
+}
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 1d82ff0712..ed8c64ebb4 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -194,7 +194,7 @@ input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type,
}
template <typename T>
-using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
+using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC