diff options
author | Michalis Spyrou <michalis.spyrou@arm.com> | 2019-09-10 17:20:34 +0100 |
---|---|---|
committer | Michalis Spyrou <michalis.spyrou@arm.com> | 2019-09-26 10:17:30 +0000 |
commit | 1a569a30a2f456ff1a3e0a665201e1c3ab92df80 (patch) | |
tree | 9d68934f461579edefbe65246f6ee435aaa18808 /src/runtime/NEON/functions/NERNNLayer.cpp | |
parent | f1cf394ae882e6e8fb2e0986f88d2548b82a85bb (diff) | |
download | ComputeLibrary-1a569a30a2f456ff1a3e0a665201e1c3ab92df80.tar.gz |
COMPMID-2161 [NEON] Create IWeightManager class
Change-Id: I1a9a46da2f98e896b825099151b56d1d8271dd31
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1915
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NERNNLayer.cpp')
-rw-r--r-- | src/runtime/NEON/functions/NERNNLayer.cpp | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp index 9ca7ded3be..67f4064632 100644 --- a/src/runtime/NEON/functions/NERNNLayer.cpp +++ b/src/runtime/NEON/functions/NERNNLayer.cpp @@ -34,8 +34,8 @@ namespace arm_compute { NERNNLayer::NERNNLayer(std::shared_ptr<IMemoryManager> memory_manager) - : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), _activation_kernel(), _fully_connected_kernel(), _copy_kernel(), _fully_connected_out(), _gemm_output(), _add_output(), - _is_prepared(false) + : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), _activation_kernel(), _fully_connected(memory_manager), _copy_kernel(), _fully_connected_out(), _gemm_output(), + _add_output(), _is_prepared(false) { } @@ -81,7 +81,7 @@ void NERNNLayer::configure(const ITensor *input, const ITensor *weights, const I // Manage intermediate buffers and configure _memory_group.manage(&_fully_connected_out); - _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out); + _fully_connected.configure(input, weights, bias, &_fully_connected_out); _memory_group.manage(&_gemm_output); _gemm_state_f.configure(hidden_state, recurrent_weights, nullptr, &_gemm_output, 1.f, 0.f); @@ -106,7 +106,7 @@ void NERNNLayer::run() MemoryGroupResourceScope scope_mg(_memory_group); - _fully_connected_kernel.run(); + _fully_connected.run(); _gemm_state_f.run(); @@ -121,7 +121,7 @@ void NERNNLayer::prepare() { if(!_is_prepared) { - _fully_connected_kernel.prepare(); + _fully_connected.prepare(); _gemm_state_f.prepare(); _is_prepared = true; |