aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/kernels/assembly
diff options
context:
space:
mode:
authorDavid Mansell <David.Mansell@arm.com>2018-07-06 17:53:35 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commite39334c15c7fd141bb8173d5017ea5ca157fca2c (patch)
treefffa2f7b136525037c4d99586bc194374e5bd3dc /arm_compute/core/NEON/kernels/assembly
parente8bd2c729546e59aa0adc241976ea91fc6f25b52 (diff)
downloadComputeLibrary-e39334c15c7fd141bb8173d5017ea5ca157fca2c.tar.gz
COMPMID-1271: New system for GEMM heuristics
This patch implements a system for separating the "validity" from "preferred" aspect of the current heuristics in gemm_*.cpp. Now, each gemm_*.cpp defines a list of candidate implementations, each of which supplies an is_valid() function (to check for validity), an is_preferred() function (the "heuristic" part), and an instantiate() function which actually produces the GemmCommon object pointer. The actual gemm() function is now templated and uses this list to select an implementation. This patch also implements a mechanism to identify the preferred implementation, and override it via the GemmConfig structure. Change-Id: Id49ab7af8bf2e3e9fd951a9698883ade234d40e1 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/139120 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/assembly')
-rw-r--r--arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp99
1 files changed, 98 insertions, 1 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
index 8d1433dd24..162cbc5c46 100644
--- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
@@ -30,9 +30,100 @@
namespace arm_gemm {
+enum class GemmMethod
+{
+ DEFAULT,
+ GEMV_BATCHED,
+ GEMV_PRETRANSPOSED,
+ GEMV_NATIVE_TRANSPOSED,
+ GEMM_NATIVE,
+ GEMM_INTERLEAVED,
+ GEMM_INTERLEAVED_FP16,
+ GEMM_INTERLEAVED_DOT
+};
+
+struct GemmConfig
+{
+ GemmMethod method = GemmMethod::DEFAULT;
+ unsigned int inner_block_size = 0;
+ unsigned int outer_block_size = 0;
+
+ GemmConfig(GemmMethod method) : method(method) { }
+};
+
+template<typename T>
+struct GemmArgs
+{
+public:
+ const CPUInfo *_ci;
+ unsigned int _Msize;
+ unsigned int _Nsize;
+ unsigned int _Ksize;
+ unsigned int _nbatches;
+ unsigned int _nmulti;
+ bool _trA;
+ bool _trB;
+ T _alpha;
+ T _beta;
+ int _maxthreads;
+ bool _pretransposed_hint;
+
+ GemmArgs(const CPUInfo *ci, const unsigned int M, const unsigned int N,
+ const unsigned int K, const unsigned int nbatches,
+ const unsigned int nmulti, const bool trA, const bool trB,
+ const T alpha, const T beta, const int maxthreads,
+ const bool pretransposed_hint) :
+ _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
+ _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads),
+ _pretransposed_hint(pretransposed_hint)
+ {
+ }
+};
+
template<typename Top, typename Tret>
using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret> >;
+/* Low level API calls.
+ * These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */
+
+/* method_is_compatible(): Can a GEMM of the templated types with the
+ * provided parameters be provided using the supplied method? */
+
+template<typename Top, typename Tret>
+bool method_is_compatible(GemmMethod method, GemmArgs<Tret> &args);
+
+template<typename Top, typename Tret>
+bool method_is_compatible(GemmMethod method, const CPUInfo &ci,
+ const unsigned int M, const unsigned int N, const unsigned int K,
+ const unsigned int nbatches, const unsigned int nmulti,
+ const bool trA, const bool trB, const Tret alpha, const Tret beta,
+ const int maxthreads, const bool pretransposed_hint)
+{
+ GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
+
+ return method_is_compatible<Top, Tret>(method, args);
+}
+
+/* get_gemm_method(): Given the templated types and provided parameters,
+ * which is the preferred method to implement this GEMM? */
+template<typename Top, typename Tret>
+GemmMethod get_gemm_method(GemmArgs<Tret> &args);
+
+template<typename Top, typename Tret>
+GemmMethod get_gemm_method(const CPUInfo &ci,
+ const unsigned int M, const unsigned int N, const unsigned int K,
+ const unsigned int nbatches, const unsigned int nmulti,
+ const bool trA, const bool trB, const Tret alpha, const Tret beta,
+ const int maxthreads, const bool pretransposed_hint)
+{
+ GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
+
+ return get_gemm_method<Top, Tret>(args);
+}
+
+template<typename Top, typename Tret>
+UniqueGemmCommon<Top, Tret> gemm(GemmArgs<Tret> &args, GemmConfig *cfg);
+
/** Request an object to process a GEMM.
*
* @param[in] ci Describes CPU properties.
@@ -47,12 +138,18 @@ using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret> >;
* @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
* @param[in] maxthreads Maximum (and default) number of threads that will call execute method.
* @param[in] pretransposed_hint Can the B tensor can be pretransposed (ie shared across invocations)?
+ * @param[in] cfg (optional) configuration parameters
*/
template<typename Top, typename Tret>
UniqueGemmCommon<Top, Tret> gemm(const CPUInfo &ci,
const unsigned int M, const unsigned int N, const unsigned int K,
const unsigned int nbatches, const unsigned int nmulti,
const bool trA, const bool trB, const Tret alpha, const Tret beta,
- const int maxthreads, const bool pretransposed_hint);
+ const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr)
+{
+ GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
+
+ return gemm<Top, Tret>(args, cfg);
+}
} // namespace arm_gemm