aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorLuca Foschiani <luca.foschiani@arm.com>2020-01-28 10:38:07 +0000
committerLuca Foschiani <luca.foschiani@arm.com>2020-02-26 09:39:08 +0000
commitee939fb58e3fc50ae7c92c895f8abd1dd9f20eb3 (patch)
treefd199008e32f95390270010cf3d449745abae3b8 /arm_compute
parentca8c0f79a9fc1b3056c574bdd134748ea3cfd885 (diff)
downloadComputeLibrary-ee939fb58e3fc50ae7c92c895f8abd1dd9f20eb3.tar.gz
COMPMID-2774: Add support for QASYMM8_SIGNED in NEReductionOperation, NEReduceMean and NEArgMinMaxLayer
Signed-off-by: Luca Foschiani <luca.foschiani@arm.com> Change-Id: Icf198a983c8ce2c6cd8451a1190bb99115eac3af Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2652 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/NEReductionOperationKernel.h6
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/cvt.h61
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/Utils.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEReduceMean.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEReductionOperation.h6
7 files changed, 80 insertions, 12 deletions
diff --git a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
index 36792abee3..28cca4987b 100644
--- a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,7 +59,7 @@ public:
/** Set the source, destination of the kernel
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW.
+ * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW.
* @param[out] output Destination tensor.Data types and data layouts supported: same as @p input, S32 for ARG_MIX/ARG_MAX.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0
@@ -69,7 +69,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperationKernel.
*
- * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW.
+ * @param[in] input Source tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW.
* @param[in] output Destination tensor info.Data types and data layouts supported: same as @p input, S32 for ARG_MIX/ARG_MAX.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/cvt.h b/arm_compute/core/NEON/wrapper/intrinsics/cvt.h
new file mode 100644
index 0000000000..1f22e09a11
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/cvt.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CVT_H
+#define ARM_COMPUTE_WRAPPER_CVT_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCVT_TO_F32_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, float32x4_t>::type \
+ vcvt(const vtype &a) \
+ { \
+ return prefix##_##postfix1##_##postfix2(a); \
+ }
+
+VCVT_TO_F32_IMPL(float32x4_t, uint32x4_t, vcvtq, f32, u32)
+VCVT_TO_F32_IMPL(float32x4_t, int32x4_t, vcvtq, f32, s32)
+#undef VCVT_TO_F32_IMPL
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint32x4_t>::type
+vcvt(const float32x4_t &a)
+{
+ return vcvtq_u32_f32(a);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int8_t>::value, int32x4_t>::type
+vcvt(const float32x4_t &a)
+{
+ return vcvtq_s32_f32(a);
+}
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CVT_H */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index a7af352c76..51b1fcc1bd 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -33,6 +33,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/cgt.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/clt.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/combine.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/cvt.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/div.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/eor.h"
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index 7ab78be908..4a3b01d21f 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -596,6 +596,12 @@ inline std::tuple<PixelValue, PixelValue> get_min_max(DataType dt)
max = PixelValue(std::numeric_limits<int32_t>::max());
break;
}
+ case DataType::F16:
+ {
+ min = PixelValue(std::numeric_limits<half>::lowest());
+ max = PixelValue(std::numeric_limits<half>::max());
+ break;
+ }
case DataType::F32:
{
min = PixelValue(std::numeric_limits<float>::lowest());
diff --git a/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h b/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h
index e4a7b94a7a..c50f358d1f 100644
--- a/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ public:
NEArgMinMaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] input Input source tensor. Data types supported: QASYMM8/S32/F16/F32.
+ * @param[in] input Input source tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/S32/F16/F32.
* @param[in] axis Axis to find max/min index.
* @param[out] output Output source tensor. Data types supported: U32/S32.
* @param[in] op Operation to perform: min or max
@@ -62,7 +62,7 @@ public:
void configure(ITensor *input, int axis, ITensor *output, const ReductionOperation &op);
/** Static function to check if given info will lead to a valid configuration of @ref NEArgMinMaxLayer
*
- * @param[in] input Input source tensor info. Data types supported: QASYMM8/S32/F16/F32.
+ * @param[in] input Input source tensor info. Data types supported: QASYMM8_SIGNED/QASYMM8/S32/F16/F32.
* @param[in] axis Axis to find max/min index.
* @param[in] output Output source tensor info. Data types supported: U32/S32.
* @param[in] op Operation to perform: min or max
diff --git a/arm_compute/runtime/NEON/functions/NEReduceMean.h b/arm_compute/runtime/NEON/functions/NEReduceMean.h
index 69804b1fd8..3c7cc21929 100644
--- a/arm_compute/runtime/NEON/functions/NEReduceMean.h
+++ b/arm_compute/runtime/NEON/functions/NEReduceMean.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32
+ * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32
* @param[in] reduction_axis Reduction axis vector.
* @param[in] keep_dims If positive, retains reduced dimensions with length 1.
* @param[out] output Destination tensor. Data type supported: Same as @p input
@@ -54,7 +54,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEReduceMean
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32
+ * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32
* @param[in] reduction_axis Reduction axis vector.
* @param[in] keep_dims If positive, retains reduced dimensions with length 1.
* @param[in] output Destination tensor. Data type supported: Same as @p input
diff --git a/arm_compute/runtime/NEON/functions/NEReductionOperation.h b/arm_compute/runtime/NEON/functions/NEReductionOperation.h
index 24142315f4..abda4159ba 100644
--- a/arm_compute/runtime/NEON/functions/NEReductionOperation.h
+++ b/arm_compute/runtime/NEON/functions/NEReductionOperation.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ public:
NEReductionOperation(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
+ * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
* @param[in] op Reduction operation to perform.
@@ -59,7 +59,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperation.
*
- * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
+ * @param[in] input Source tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
* @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
* @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
* @param[in] op Reduction operation to perform.