aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/runtime/NEON/AssemblyHelper.h48
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMM.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h1
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp5
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp10
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp6
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp8
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp2
10 files changed, 57 insertions, 26 deletions
diff --git a/arm_compute/runtime/NEON/AssemblyHelper.h b/arm_compute/runtime/NEON/AssemblyHelper.h
index 2b4f35f2e1..ee09ef531e 100644
--- a/arm_compute/runtime/NEON/AssemblyHelper.h
+++ b/arm_compute/runtime/NEON/AssemblyHelper.h
@@ -51,7 +51,7 @@ public:
using TypeResult = TypeOutput;
/** Default constructor. */
AssemblyKernelGlue()
- : _gemm_kernel_asm(nullptr), _optimised_kernel(nullptr), _a(nullptr), _b(nullptr), _d(nullptr)
+ : _gemm_kernel_asm(nullptr), _optimised_kernel(nullptr), _a(nullptr), _b(nullptr), _d(nullptr), _pretranspose(nullptr)
{
}
/** Assembly Gemm */
@@ -72,6 +72,8 @@ public:
const ITensor *_b;
/** Output */
ITensor *_d;
+ /** Pre-transpose tensor */
+ ITensor *_pretranspose;
/** Configures the arrays pointers and strides in the assembly kernel and executes the assembly kernel.
* The call to set_arrays is needed to deal with the input sizes containing batches (dims > 2)
@@ -94,6 +96,12 @@ public:
auto out_ptr = reinterpret_cast<TypeOutput *>(_d->buffer());
_gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a, in1_ptr, ldb, multi_stride_b, out_ptr, ldd, batch_stride_d, multi_stride_d);
+ if(_gemm_kernel_asm->B_pretranspose_required())
+ {
+ ARM_COMPUTE_ERROR_ON(_pretranspose == nullptr || _pretranspose->buffer() == nullptr);
+ _gemm_kernel_asm->pretranspose_B_array(reinterpret_cast<void *>(_pretranspose->buffer()), in1_ptr, ldb, multi_stride_b);
+ }
+
NEScheduler::get().schedule(_optimised_kernel.get(), Window::DimX);
}
};
@@ -113,8 +121,9 @@ using AssemblyKernelGlueS8S32 = AssemblyKernelGlue<int8_t, int32_t>;
* @param[in] alignment Workspace memory alignment.
* @param[in] num_threads Number of workspace threads.
*/
-inline void allocate_workspace(size_t workspace_size, Tensor &workspace, MemoryGroup &memory_group, size_t alignment, unsigned int num_threads)
+inline void allocate_workspace(size_t workspace_size, Tensor &workspace, MemoryGroup *memory_group, size_t alignment, unsigned int num_threads)
{
+ ARM_COMPUTE_UNUSED(memory_group);
ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment - 1) * num_threads }, 1, DataType::S8));
workspace.allocator()->allocate();
@@ -122,20 +131,22 @@ inline void allocate_workspace(size_t workspace_size, Tensor &workspace, MemoryG
/** Create a wrapper kernel.
*
- * @param[in] a Input tensor A.
- * @param[in] b Input tensor B.
- * @param[out] d Output tensor.
- * @param[in] alpha Alpha value.
- * @param[in] beta Beta value.
- * @param[out] workspace Workspace tensor
- * @param[in] memory_group Tensor memory group.
- * @param[out] asm_glue Assembly glue kernel.
+ * @param[in] a Input tensor A.
+ * @param[in] b Input tensor B.
+ * @param[out] d Output tensor.
+ * @param[in] alpha Alpha value.
+ * @param[in] beta Beta value.
+ * @param[in] pretranspose_hint Pre-transpose hint in case matrix b should be pre-transposed
+ * @param[out] workspace Workspace tensor
+ * @param[out] B_pretranspose Tensor to hold the pre-transposed B
+ * @param[in] memory_group Tensor memory group.
+ * @param[out] asm_glue Assembly glue kernel.
*
* @return the wrapper kernel.
*/
template <typename T>
-inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta,
- Tensor &workspace, MemoryGroup &memory_group, T &asm_glue)
+inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
+ Tensor &workspace, Tensor &B_pretranspose, MemoryGroup &memory_group, T &asm_glue)
{
const CPUInfo &ci = NEScheduler::get().cpu_info();
const int M = d->info()->tensor_shape().y();
@@ -147,7 +158,7 @@ inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, ITensor *d
// unique_ptr to a Gemm object
std::unique_ptr<typename T::AssemblyGemm>
- asm_gemm(arm_gemm::gemm<typename T::TypeOperator, typename T::TypeResult>(ci, M, N, K, batches, multis, false, false, alpha, beta, num_threads, false));
+ asm_gemm(arm_gemm::gemm<typename T::TypeOperator, typename T::TypeResult>(ci, M, N, K, batches, multis, false, false, alpha, beta, num_threads, pretranspose_hint));
// arm_compute wrapper for the Gemm object (see above)
std::unique_ptr<NEGEMMAssemblyWrapper<typename T::AssemblyGemm>>
acl_gemm_wrapper = support::cpp14::make_unique<NEGEMMAssemblyWrapper<typename T::AssemblyGemm>>();
@@ -159,7 +170,7 @@ inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, ITensor *d
{
// Allocate workspace
const unsigned int alignment = 4096;
- allocate_workspace(workspace_size, workspace, memory_group, alignment, num_threads);
+ allocate_workspace(workspace_size, workspace, &memory_group, alignment, num_threads);
ARM_COMPUTE_ERROR_ON_NULLPTR(workspace.buffer());
asm_gemm->set_working_space(reinterpret_cast<typename T::TypeResult *>(workspace.buffer()));
}
@@ -175,6 +186,15 @@ inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, ITensor *d
}
}
+ // Check for pre-transposed support
+ if(asm_gemm->B_pretranspose_required())
+ {
+ const size_t B_pretranspose_size = asm_gemm->get_B_pretransposed_array_size();
+ allocate_workspace(B_pretranspose_size, B_pretranspose, nullptr, 1, 1);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(B_pretranspose.buffer());
+ asm_glue._pretranspose = &B_pretranspose;
+ }
+
asm_glue._gemm_kernel_asm = std::move(asm_gemm);
asm_glue._optimised_kernel = std::move(acl_gemm_wrapper);
// We need to setup the ptrs in the run() method
diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h
index 5279995be4..e2263c2307 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMM.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMM.h
@@ -83,6 +83,7 @@ private:
Tensor _tmp_a;
Tensor _tmp_b;
Tensor _workspace;
+ Tensor _B_pretransposed;
bool _run_vector_matrix_multiplication;
bool _run_addition;
bool _is_first_run;
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index 24e23f133a..752693188c 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -185,6 +185,7 @@ private:
Tensor _gemm_output;
Tensor _tmp_output;
Tensor _workspace;
+ Tensor _B_pretransposed;
bool _append_bias;
bool _is_fully_connected_convolution;
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index f09c94e726..11ca1bc313 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -67,6 +67,7 @@ private:
Tensor _tmp_a;
Tensor _tmp_b;
Tensor _workspace;
+ Tensor _B_pretransposed;
};
}
#endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 3c9fb0ea5f..adcddb8263 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -102,6 +102,7 @@ private:
Tensor _tmp_a;
Tensor _tmp_b;
Tensor _workspace;
+ Tensor _B_pretranspose;
int32_t _a_offset;
int32_t _b_offset;
bool _run_vector_matrix_multiplication;
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index e0859be93e..9168ed4327 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -39,7 +39,7 @@
namespace arm_compute
{
NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _asm_glue(), _ma_kernel(), _tmp_a(), _tmp_b(), _workspace(),
+ : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _asm_glue(), _ma_kernel(), _tmp_a(), _tmp_b(), _workspace(), _B_pretransposed(),
_run_vector_matrix_multiplication(false), _run_addition(false), _is_first_run(true), _reshape_b_only_on_first_run(false)
{
}
@@ -66,7 +66,8 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
_reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
_run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
- const bool run_optimised = a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f) && setup_assembly_kernel(a, b, d, alpha, beta, _workspace, _memory_group, _asm_glue);
+ const bool run_optimised = a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f)
+ && setup_assembly_kernel(a, b, d, alpha, beta, _reshape_b_only_on_first_run, _workspace, _B_pretransposed, _memory_group, _asm_glue);
// Check if the first input tensor is a vector.
// If so, all the kernels for reshaping the tensors can be skipped
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index 3c48d691ed..1ffeaf227d 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -212,7 +212,8 @@ Status validate_and_initialize_values(const ITensorInfo *input, const ITensorInf
NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
: _asm_glue(), _memory_group(memory_manager), _input_im2col_kernel(), _input_interleave_kernel(), _reshape_weights(), _mm_kernel(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
_output_col2im_kernel(), _activationlayer_function(), _original_weights(nullptr), _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _gemm_output(), _tmp_output(),
- _workspace(), _append_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false), _is_quantized(false), _is_interleaved(false), _is_activationlayer_enabled(false)
+ _workspace(), _B_pretransposed(), _append_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false), _is_quantized(false), _is_interleaved(false),
+ _is_activationlayer_enabled(false)
{
}
@@ -365,7 +366,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
// Configure matrix multiply
if(run_optimised)
{
- if(!setup_assembly_kernel(&_input_im2col_reshaped, weights, &_gemm_output, 1.f, 0.f, _workspace, _memory_group, _asm_glue))
+ if(!setup_assembly_kernel(&_input_im2col_reshaped, weights, &_gemm_output, 1.f, 0.f, true, _workspace, _B_pretransposed, _memory_group, _asm_glue))
{
ARM_COMPUTE_ERROR("setup_assembly_kernel failed.");
}
@@ -559,6 +560,11 @@ void NEGEMMConvolutionLayer::run()
if(_asm_glue._optimised_kernel != nullptr)
{
_asm_glue.run();
+ // Release weights in case buffer is pretransposed
+ if(_B_pretransposed.buffer() != nullptr && _weights_reshaped.is_used())
+ {
+ _weights_reshaped.allocator()->free();
+ }
}
else
{
diff --git a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
index 27dd6c51d7..bd81bf202f 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
@@ -39,7 +39,7 @@ using namespace arm_compute;
NEGEMMLowpAssemblyMatrixMultiplyCore::NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _asm_glue_unsigned(), _asm_glue_signed(), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b(),
- _workspace()
+ _workspace(), _B_pretransposed()
{
}
@@ -58,13 +58,13 @@ void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITe
{
case DataType::S8:
{
- run_optimised = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_signed);
+ run_optimised = setup_assembly_kernel(a, b, output, 1.f, 1.f, true, _workspace, _B_pretransposed, _memory_group, _asm_glue_signed);
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- run_optimised = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_unsigned);
+ run_optimised = setup_assembly_kernel(a, b, output, 1.f, 1.f, true, _workspace, _B_pretransposed, _memory_group, _asm_glue_unsigned);
break;
}
default:
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index cbec73fc31..30dd289326 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -42,8 +42,8 @@ using namespace arm_compute::misc::shape_calculator;
NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _asm_glue_unsigned(), _asm_glue_signed(), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(),
- _mtx_b_reduction_kernel(), _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _workspace(), _a_offset(0), _b_offset(0), _run_vector_matrix_multiplication(false),
- _dot_product_path(false), _is_first_run(true), _reshape_b_only_on_first_run(false)
+ _mtx_b_reduction_kernel(), _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _workspace(), _B_pretranspose(), _a_offset(0), _b_offset(0),
+ _run_vector_matrix_multiplication(false), _dot_product_path(false), _is_first_run(true), _reshape_b_only_on_first_run(false)
{
}
@@ -62,13 +62,13 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
{
case DataType::S8:
{
- _dot_product_path = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_signed);
+ _dot_product_path = setup_assembly_kernel(a, b, output, 1.f, 1.f, true, _workspace, _B_pretranspose, _memory_group, _asm_glue_signed);
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- _dot_product_path = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_unsigned);
+ _dot_product_path = setup_assembly_kernel(a, b, output, 1.f, 1.f, true, _workspace, _B_pretranspose, _memory_group, _asm_glue_unsigned);
break;
}
default:
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index a1256ac8cb..f4640fb0b6 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -227,7 +227,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
if(workspace_size > 0)
{
const unsigned int alignment = 4096;
- allocate_workspace(workspace_size, _workspace, _memory_group, alignment, 1);
+ allocate_workspace(workspace_size, _workspace, &_memory_group, alignment, 1);
_arm_gemm->set_working_space(reinterpret_cast<float *>(_workspace.buffer()));
}