diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2018-07-17 12:28:42 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:54:54 +0000 |
commit | 7d66a8e3f603f2cd363f04a750847e3f9eabdfd4 (patch) | |
tree | 0d7e1ad5bf0ecd32cd919074f756d27c351d7638 /src/runtime/NEON/functions/NERNNLayer.cpp | |
parent | ae54e026c86aec7d6819ee3ef76372c1a3c92467 (diff) | |
download | ComputeLibrary-7d66a8e3f603f2cd363f04a750847e3f9eabdfd4.tar.gz |
COMPMID-1386: Add support for converting weights for CL.
Change-Id: I62e3ead903366baeeb1488f233a9b8b0c388c9de
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140403
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NERNNLayer.cpp')
-rw-r--r-- | src/runtime/NEON/functions/NERNNLayer.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp index 08017e20c3..f77566a108 100644 --- a/src/runtime/NEON/functions/NERNNLayer.cpp +++ b/src/runtime/NEON/functions/NERNNLayer.cpp @@ -57,7 +57,7 @@ Status NERNNLayer::validate(const ITensorInfo *input, const ITensorInfo *weights auto shape_info = TensorInfo(misc::shape_calculator::compute_rnn_shape(recurrent_weights, hidden_state->dimension(idx_height)), 1, input->data_type()); - ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, weights, bias, &shape_info, true, false)); + ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, weights, bias, &shape_info)); ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAdditionKernel::validate(&shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE)); ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&shape_info, &shape_info, info)); @@ -79,7 +79,7 @@ void NERNNLayer::configure(const ITensor *input, const ITensor *weights, const I // Manage intermediate buffers and configure _fully_connected_out.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); _memory_group.manage(&_fully_connected_out); - _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out, true, false); + _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out); _gemm_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); _memory_group.manage(&_gemm_output); |