aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-27 17:00:52 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-26 11:55:15 +0000
commitcfa2bba98169cb5ab1945462514be1b6badf7d98 (patch)
tree1635e6e9463e9798c7195f0aa71b5df3f2650df1 /arm_compute/core
parentf59b16f42ef68bde877b70816ffb953d64c8baa3 (diff)
downloadComputeLibrary-cfa2bba98169cb5ab1945462514be1b6badf7d98.tar.gz
COMPMID-2178: Update GEMM assembly code.
Perform offset reduction and requantization within the assembly wrapper. Change-Id: I5d5b3e1f6f9ef4c71805362c57f88ff199c027a3 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1541 Comments-Addressed: Pablo Marquez <pablo.tello@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp116
-rw-r--r--arm_compute/core/NEON/kernels/assembly/gemm_common.hpp10
2 files changed, 43 insertions, 83 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
index 1b511ba79a..828b0f20a7 100644
--- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
@@ -39,7 +39,9 @@ enum class GemmMethod
GEMV_NATIVE_TRANSPOSED,
GEMM_NATIVE,
GEMM_HYBRID,
- GEMM_INTERLEAVED
+ GEMM_INTERLEAVED,
+ QUANTIZE_WRAPPER,
+ GEMM_HYBRID_QUANTIZED
};
struct KernelDescription
@@ -86,98 +88,52 @@ public:
const unsigned int nmulti, const bool trA, const bool trB,
const T alpha, const T beta, const int maxthreads,
const bool pretransposed_hint, const GemmConfig *cfg=nullptr ) :
- _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
- _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads),
- _pretransposed_hint(pretransposed_hint), _cfg(cfg)
+ _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
+ _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads),
+ _pretransposed_hint(pretransposed_hint), _cfg(cfg)
{
}
};
+struct ARequantizeLayer32
+{
+public:
+ const int32_t *bias;
+ int32_t a_offset;
+ int32_t b_offset;
+ int32_t c_offset;
+ int32_t requant_shift;
+ int32_t requant_mul;
+ int32_t minval;
+ int32_t maxval;
+
+ ARequantizeLayer32() = default;
+
+ ARequantizeLayer32(int32_t *b, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) :
+ bias(b), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv)
+ {
+ }
+};
+
+struct Nothing
+{
+};
+
template<typename Top, typename Tret>
using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret> >;
/* Low level API calls.
* These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */
-/* method_is_compatible(): Can a GEMM of the templated types with the
- * provided parameters be provided using the supplied method? */
-
-template<typename Top, typename Tret>
-bool method_is_compatible(GemmMethod method, const GemmArgs<Tret> &args);
-
-template<typename Top, typename Tret>
-bool method_is_compatible(GemmMethod method, const CPUInfo &ci,
- const unsigned int M, const unsigned int N, const unsigned int K,
- const unsigned int nbatches, const unsigned int nmulti,
- const bool trA, const bool trB, const Tret alpha, const Tret beta,
- const int maxthreads, const bool pretransposed_hint)
-{
- GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
-
- return method_is_compatible<Top, Tret>(method, args);
-}
-
/* get_gemm_method(): Given the templated types and provided parameters,
* which is the preferred method to implement this GEMM? */
-template<typename Top, typename Tret>
-KernelDescription get_gemm_method(const GemmArgs<Tret> &args);
+template<typename Top, typename Tret, class OutputStage = Nothing>
+KernelDescription get_gemm_method(const GemmArgs<Tret> &args, const OutputStage & ={});
-template<typename Top, typename Tret>
-KernelDescription get_gemm_method(const CPUInfo &ci,
- const unsigned int M, const unsigned int N, const unsigned int K,
- const unsigned int nbatches, const unsigned int nmulti,
- const bool trA, const bool trB, const Tret alpha, const Tret beta,
- const int maxthreads, const bool pretransposed_hint)
-{
- GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
-
- return get_gemm_method<Top, Tret>(args);
-}
-
-template<typename Top, typename Tret>
-UniqueGemmCommon<Top, Tret> gemm(const GemmArgs<Tret> &args);
-
-/** Request an object to process a GEMM.
- *
- * @param[in] ci Describes CPU properties.
- * @param[in] M Rows in output matrix C (and input matrix A).
- * @param[in] N Columns in output matrix C (and input matrix B).
- * @param[in] K Columns of input matrix A (= rows of input matrix B).
- * @param[in] nbatches Number of "batched" GEMMs (unique A and C, shared B).
- * @param[in] nmulti Number of "multi" GEMMs (unique A, B and C).
- * @param[in] trA Does A tensor has rows and columns transposed?
- * @param[in] trB Does B tensor has rows and columns transposed?
- * @param[in] alpha Scalar multiplier to apply to AB matrix product.
- * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
- * @param[in] maxthreads Maximum (and default) number of threads that will call execute method.
- * @param[in] pretransposed_hint Can the B tensor can be pretransposed (ie shared across invocations)?
- * @param[in] cfg (optional) configuration parameters
- */
-template<typename Top, typename Tret>
-UniqueGemmCommon<Top, Tret> gemm(const CPUInfo &ci,
- const unsigned int M, const unsigned int N, const unsigned int K,
- const unsigned int nbatches, const unsigned int nmulti,
- const bool trA, const bool trB, const Tret alpha, const Tret beta,
- const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr)
-{
- GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg);
-
- return gemm<Top, Tret>(args);
-}
-
-template<typename Top, typename Tret>
-std::vector<KernelDescription> get_compatible_kernels(const GemmArgs<Tret> &args);
-
-template<typename Top, typename Tret>
-std::vector<KernelDescription> get_compatible_kernels(const CPUInfo &ci,
- const unsigned int M, const unsigned int N, const unsigned int K,
- const unsigned int nbatches, const unsigned int nmulti,
- const bool trA, const bool trB, const Tret alpha, const Tret beta,
- const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr)
-{
- GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg);
+template<typename Top, typename Tret, class OutputStage = Nothing>
+UniqueGemmCommon<Top, Tret> gemm(const GemmArgs<Tret> &args, const OutputStage & ={});
- return get_compatible_kernels<Top, Tret>(args);
-}
+template<typename Top, typename Tret, class OutputStage = Nothing>
+std::vector<KernelDescription> get_compatible_kernels(const GemmArgs<Tret> &args, const OutputStage & ={});
} // namespace arm_gemm
diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
index bb32fea9da..f59a61703f 100644
--- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
@@ -46,7 +46,7 @@ public:
*/
virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
- void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0;
+ void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0;
/* For threading, we divide the work into some number of units and work
* out internally what unit corresponds to what work. This returns the
@@ -93,6 +93,10 @@ public:
/* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */
virtual void set_pretransposed_B_data(void *) { }
+ /*** "Quantized bias" interface (optional) ***/
+ /* Set the bias vector for quantized GEMMs */
+ virtual void set_quantized_bias(const int32_t *bias) { }
+
// Destructor
virtual ~IGemmCommon() { }
};
@@ -125,7 +129,7 @@ public:
* strides (templated version with appropriate types). */
virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
const To *B, const int ldb, /* batches share B */ const int B_multi_stride,
- Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) {
+ Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) {
_Aptr = A;
_lda = lda;
_A_batch_stride = A_batch_stride;
@@ -142,7 +146,7 @@ public:
/* Implementation of the void * overload which casts its arguments to the appropriate type. */
void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
- void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override {
+ void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override {
set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride,
static_cast<const To *>(B), ldb, B_multi_stride,
static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride);