aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-08-22 11:44:04 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-08-23 13:13:08 +0000
commit29a01c90fc372d31188ab7157b45b32ce24fa9b3 (patch)
tree419b7abc22c56fde8dece4c80c328a209c041d94 /src
parentfb0fdcdaec57e6f8e1b96f924411921cc0ba6d94 (diff)
downloadComputeLibrary-29a01c90fc372d31188ab7157b45b32ce24fa9b3.tar.gz
COMPMID-2417: NEDequantizationLayer support for QASYMM8_PER_CHANNEL
Change-Id: I1ef4ce8610e11e81702b0b7f0f7c437fed49833e Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1795 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEDequantizationLayerKernel.cpp47
1 files changed, 46 insertions, 1 deletions
diff --git a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
index e52f53ea04..d880c80d82 100644
--- a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
@@ -43,7 +43,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8, DataType::QSYMM16);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_PER_CHANNEL, DataType::QSYMM8, DataType::QSYMM16);
if(output->tensor_shape().total_size() > 0)
{
@@ -160,6 +160,48 @@ void run_dequantization_qasymm8(const ITensor *input, ITensor *output, const Win
}
template <typename T>
+void run_dequantization_qasymm8_per_channel(const ITensor *input, ITensor *output, const Window &window)
+{
+ const std::vector<float> scale = input->info()->quantization_info().scale();
+ const std::vector<int32_t> offset = input->info()->quantization_info().offset();
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Reset first dimension to handle tail calculations manually
+ Window win(window);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win);
+ Iterator out(output, win);
+
+ execute_window_loop(win, [&](const Coordinates & id)
+ {
+ const auto in_ptr = reinterpret_cast<const uint8_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(out.ptr());
+
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize(vin, scale[id.z()], offset[id.z()]);
+
+ store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ uint8_t val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<T>(dequantize(val, scale[id.z()], offset[id.z()]));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
void run_dequantization_qsymm8(const ITensor *input, ITensor *output, const Window &window)
{
const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
@@ -251,6 +293,9 @@ void run_dequantization_core(const ITensor *input, ITensor *output, const Window
case DataType::QASYMM8:
run_dequantization_qasymm8<T>(input, output, window);
break;
+ case DataType::QASYMM8_PER_CHANNEL:
+ run_dequantization_qasymm8_per_channel<T>(input, output, window);
+ break;
case DataType::QSYMM8:
run_dequantization_qsymm8<T>(input, output, window);
break;