aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/NEON/kernels/assembly/Helpers.h22
-rw-r--r--arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h1
-rw-r--r--src/core/NEON/kernels/assembly/Helpers.cpp114
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h24
-rw-r--r--src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp79
5 files changed, 198 insertions, 42 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/Helpers.h b/arm_compute/core/NEON/kernels/assembly/Helpers.h
index 0dcba88a95..11c4c08086 100644
--- a/arm_compute/core/NEON/kernels/assembly/Helpers.h
+++ b/arm_compute/core/NEON/kernels/assembly/Helpers.h
@@ -38,6 +38,28 @@ struct BlockSizes
unsigned int strategy_out_height{ 0 }; /**< Number of rows (M) processed by the selected strategy */
};
+/** Calculate the recommended block sizes to use based on the CPU cache sizes and data type
+ *
+ * @param[in] ci CPU information
+ * @param[in] M M dimension.
+ * @param[in] N N dimension.
+ * @param[in] K K dimension.
+ * @param[in] input_type Input data type
+ * @param[in] use_dot (Optional) If data_type is QASYMM8/U8/S8, then use the dot product instruction ?
+ *
+ * @return Recommeded block sizes to use for the given M, N, K dimensions.
+ */
+BlockSizes calculate_block_sizes_from_data_type(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K, DataType input_type, bool use_dot = false);
+
+/** Get the name of the GEMM strategy which will be used for a given input type
+ *
+ * @param[in] input_type Input data type
+ * @param[in] use_dot (Optional) If data_type is QASYMM8/U8/S8, then use the dot product instruction ?
+ *
+ * @return The name of the strategy that will be used
+ */
+const char *get_strategy_name(DataType input_type, bool use_dot = false);
+
/** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used
*
* @param[in] ci CPU information
diff --git a/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h b/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
index cead71ed67..4c5a8d4e59 100644
--- a/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
+++ b/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
@@ -93,6 +93,7 @@ private:
std::vector<PrepareBWorkload> _b_workloads{};
std::vector<MatrixMultiplyWorkload> _mm_workloads{};
std::vector<IScheduler::Workload> _workloads{};
+ std::string _tag{};
};
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/Helpers.cpp b/src/core/NEON/kernels/assembly/Helpers.cpp
new file mode 100644
index 0000000000..09ac08c0a4
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/Helpers.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+
+#include "NEGEMMInterleavedStrategies.h"
+
+namespace arm_compute
+{
+namespace
+{
+template <typename InputType, bool use_dot = false>
+BlockSizes calculate_block_sizes_template(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K)
+{
+ using strategy = typename Kernel<InputType, use_dot>::strategy;
+ return calculate_block_sizes<strategy>(ci, M, N, K);
+}
+} // namespace
+
+const char *get_strategy_name(DataType input_type, bool use_dot)
+{
+ switch(input_type)
+ {
+ case DataType::F32:
+ return Kernel<float>::name;
+#ifdef __aarch64__
+ case DataType::U8:
+ case DataType::QASYMM8:
+ if(use_dot)
+ {
+ return Kernel<uint8_t, true>::name;
+ }
+ else
+ {
+ return Kernel<uint8_t, false>::name;
+ }
+ case DataType::S8:
+ if(use_dot)
+ {
+ return Kernel<int8_t, true>::name;
+ }
+ else
+ {
+ return Kernel<int8_t, false>::name;
+ }
+#endif /* __aarch64__ */
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ return Kernel<__fp16>::name;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ ARM_COMPUTE_ERROR("DataType not supported");
+ break;
+ }
+}
+
+BlockSizes calculate_block_sizes_from_data_type(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K, DataType input_type, bool use_dot)
+{
+ switch(input_type)
+ {
+ case DataType::F32:
+ return calculate_block_sizes_template<float>(ci, M, N, K);
+#ifdef __aarch64__
+ case DataType::U8:
+ case DataType::QASYMM8:
+ if(use_dot)
+ {
+ return calculate_block_sizes_template<uint8_t, true>(ci, M, N, K);
+ }
+ else
+ {
+ return calculate_block_sizes_template<uint8_t, false>(ci, M, N, K);
+ }
+ case DataType::S8:
+ if(use_dot)
+ {
+ return calculate_block_sizes_template<int8_t, true>(ci, M, N, K);
+ }
+ else
+ {
+ return calculate_block_sizes_template<int8_t, false>(ci, M, N, K);
+ }
+#endif /* __aarch64__ */
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ return calculate_block_sizes_template<__fp16>(ci, M, N, K);
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ ARM_COMPUTE_ERROR("DataType not supported");
+ break;
+ }
+}
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
index 26a8ade461..00e483c0f8 100644
--- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
@@ -47,49 +47,59 @@ struct Kernel
{
};
+#define DEFINE_STRATEGY_SUFFIX(strat, suffix) \
+ using strategy = arm_gemm::strat; \
+ static constexpr const char *name = #strat suffix;
+
+#define DEFINE_STRATEGY(strat) \
+ DEFINE_STRATEGY_SUFFIX(strat, "")
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
template <>
struct Kernel<float16_t, false>
{
- using strategy = arm_gemm::hgemm_24x8;
+ DEFINE_STRATEGY(hgemm_24x8)
};
#endif /*__ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
#ifdef __aarch64__
template <>
struct Kernel<float, false>
{
- using strategy = arm_gemm::sgemm_12x8;
+ DEFINE_STRATEGY(sgemm_12x8)
};
template <>
struct Kernel<int8_t, false>
{
- using strategy = arm_gemm::gemm_s8_4x4;
+ DEFINE_STRATEGY(gemm_s8_4x4)
};
template <>
struct Kernel<uint8_t, false>
{
- using strategy = arm_gemm::gemm_u8_4x4;
+ DEFINE_STRATEGY(gemm_u8_4x4)
};
//Use different strategies for 8bit dot product:
template <>
struct Kernel<int8_t, true>
{
- using strategy = arm_gemm::gemm_s8_12x8;
+ DEFINE_STRATEGY_SUFFIX(gemm_s8_12x8, "_dot")
};
template <>
struct Kernel<uint8_t, true>
{
- using strategy = arm_gemm::gemm_u8_12x8;
+ DEFINE_STRATEGY_SUFFIX(gemm_u8_12x8, "_dot")
};
#else
template <>
struct Kernel<float, false>
{
- using strategy = arm_gemm::sgemm_8x6;
+ DEFINE_STRATEGY(sgemm_8x6)
};
#endif /* __aarch64__ */
+#undef DEFINE_STRATEGY
+#undef DEFINE_STRATEGY_SUFFIX
+
} // namespace
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ */
diff --git a/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp b/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp
index 69d59283ae..c87e82afb8 100644
--- a/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp
+++ b/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h"
#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h"
@@ -42,7 +43,7 @@ void NEGEMMInterleavedWrapper::run()
prepare();
_memory_group.acquire();
- NEScheduler::get().run_tagged_workloads(_workloads, "NEGEMMInterleavedWrapper");
+ NEScheduler::get().run_tagged_workloads(_workloads, _tag.c_str());
_memory_group.release();
}
@@ -151,51 +152,59 @@ void NEGEMMInterleavedWrapper::configure(const ITensor *a, const ITensor *b, ITe
const unsigned int alignment = 128;
_transformed_b.allocator()->init(TensorInfo{}, alignment);
_tmp_c.allocator()->init(TensorInfo{}, alignment);
+ _tag = "NEGEMMInterleaved_";
+ _tag += get_strategy_name(input_type, use_dot);
+
if(!_pretranspose_b)
{
// If B is transposed at every iteration then transformed_B can be managed:
_memory_group.manage(&_transformed_b);
+ _block_sizes = calculate_block_sizes_from_data_type(NEScheduler::get().cpu_info(), _params.M, _params.N, _params.K, input_type, use_dot);
}
- switch(input_type)
+ else
{
- case DataType::F32:
- _prepare_b = instantiate_prepareB<float>(_b, &_transformed_b, _params);
- break;
+ _tag += "_preB";
+ switch(input_type)
+ {
+ case DataType::F32:
+ _prepare_b = instantiate_prepareB<float>(_b, &_transformed_b, _params);
+ break;
#ifdef __aarch64__
- case DataType::U8:
- case DataType::QASYMM8:
- if(use_dot)
- {
- _prepare_b = instantiate_prepareB<uint8_t, true>(_b, &_transformed_b, _params);
- }
- else
- {
- _prepare_b = instantiate_prepareB<uint8_t, false>(_b, &_transformed_b, _params);
- }
- break;
- case DataType::S8:
- if(use_dot)
- {
- _prepare_b = instantiate_prepareB<int8_t, true>(_b, &_transformed_b, _params);
- }
- else
- {
- _prepare_b = instantiate_prepareB<int8_t, false>(_b, &_transformed_b, _params);
- }
- break;
+ case DataType::U8:
+ case DataType::QASYMM8:
+ if(use_dot)
+ {
+ _prepare_b = instantiate_prepareB<uint8_t, true>(_b, &_transformed_b, _params);
+ }
+ else
+ {
+ _prepare_b = instantiate_prepareB<uint8_t, false>(_b, &_transformed_b, _params);
+ }
+ break;
+ case DataType::S8:
+ if(use_dot)
+ {
+ _prepare_b = instantiate_prepareB<int8_t, true>(_b, &_transformed_b, _params);
+ }
+ else
+ {
+ _prepare_b = instantiate_prepareB<int8_t, false>(_b, &_transformed_b, _params);
+ }
+ break;
#endif /* __aarch64__ */
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F16:
- _prepare_b = instantiate_prepareB<__fp16>(_b, &_transformed_b, _params);
- break;
+ case DataType::F16:
+ _prepare_b = instantiate_prepareB<__fp16>(_b, &_transformed_b, _params);
+ break;
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- default:
- ARM_COMPUTE_ERROR("DataType not supported");
- break;
- }
- ARM_COMPUTE_ERROR_ON(_prepare_b == nullptr);
+ default:
+ ARM_COMPUTE_ERROR("DataType not supported");
+ break;
+ }
+ ARM_COMPUTE_ERROR_ON(_prepare_b == nullptr);
- _block_sizes = _prepare_b->block_sizes();
+ _block_sizes = _prepare_b->block_sizes();
+ }
_block_walker.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_params.N, _block_sizes.x_block), _block_sizes.x_block));
_block_walker.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_params.K, _block_sizes.k_block), _block_sizes.k_block));