diff options
author | Chunosov <N.Chunosov@yandex.ru> | 2017-11-08 16:09:35 +0700 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | f450caa7d2ac9a2a90407fb81203228dc82ef4a1 (patch) | |
tree | ed34d43943cd36cbd6776ddc6ac87e92d6f7dcc0 /src/runtime/CL | |
parent | 7068f9900d136312318ff430aef588b14e0c87ad (diff) | |
download | ComputeLibrary-f450caa7d2ac9a2a90407fb81203228dc82ef4a1.tar.gz |
COMPMID-661: softmax-uint8 implementation (#16)
Change-Id: Iad11ce70a8a0878a48e445a092035c49c926cece
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94855
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime/CL')
-rw-r--r-- | src/runtime/CL/functions/CLSoftmaxLayer.cpp | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index a059f9e5fd..ff018d595c 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -41,16 +41,20 @@ CLSoftmaxLayer::CLSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager) void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float beta) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32); // Create intermediate tensors shapes - _tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position())); + DataType tmp_data_type = is_data_type_quantized_asymmetric(input->info()->data_type()) ? DataType::S32 : input->info()->data_type(); + TensorInfo tensor_info_tmp(input->info()->tensor_shape(), input->info()->num_channels(), tmp_data_type, input->info()->fixed_point_position()); + tensor_info_tmp.set_quantization_info(input->info()->quantization_info()); + _tmp.allocator()->init(tensor_info_tmp); - TensorShape shape = input->info()->tensor_shape(); - shape.set(0, 1); - TensorInfo tensor_info_max_sum(shape, input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()); - _max.allocator()->init(tensor_info_max_sum); - _sum.allocator()->init(tensor_info_max_sum); + TensorShape max_sum_shape = input->info()->tensor_shape(); + max_sum_shape.set(0, 1); + TensorInfo tensor_info_max(max_sum_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()); + tensor_info_max.set_quantization_info(input->info()->quantization_info()); + _max.allocator()->init(tensor_info_max); + _sum.allocator()->init(TensorInfo(max_sum_shape, input->info()->num_channels(), tmp_data_type, input->info()->fixed_point_position())); // Set GPU target to kernels _max_shift_exp_sum_kernel.set_target(CLScheduler::get().target()); @@ -72,7 +76,7 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float { _max_shift_exp_sum_kernel.configure(input, &_max, &_tmp, &_sum, beta); } - _norm_kernel.configure(&_tmp, &_sum, output); + _norm_kernel.configure(&_tmp, &_sum, output, beta); // Allocate intermediate buffers _tmp.allocator()->allocate(); |