aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-28 10:20:18 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-05-19 11:38:32 +0000
commit11d8415aa57b69fb6c83e86a37e3026c22d1d37d (patch)
tree8f6bb12011ddc7275a8cc071dbf8ffe90a88e8eb
parent856f66e6c61b77d03f754cd0fa8439891f0e4aca (diff)
downloadComputeLibrary-11d8415aa57b69fb6c83e86a37e3026c22d1d37d.tar.gz
Port DepthConvert to new Api
- Renames DepthConvert to Cast - Ports both NEDepthConverLayer and CLDepthConvert variants - Removes legacy shift capability from DepthConvert, allowing only shifts of 0 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I806a0f8eb23d23502b632c529fda7edde19c8176 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5565 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp6
-rw-r--r--SConscript2
-rw-r--r--arm_compute/runtime/CL/functions/CLCast.h28
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthConvertLayer.h28
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h11
-rw-r--r--arm_compute/runtime/NEON/functions/NECast.h28
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h34
-rw-r--r--docs/user_guide/release_version_and_change_log.dox6
-rw-r--r--src/core/CL/CLKernels.h1
-rw-r--r--src/core/CL/cl_kernels/cast.cl (renamed from src/core/CL/cl_kernels/depth_convert.cl)20
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.h91
-rw-r--r--src/core/NEON/NEKernels.h1
-rw-r--r--src/core/NEON/kernels/NEDepthConvertLayerKernel.h96
-rw-r--r--src/core/cpu/kernels/CpuCastKernel.cpp (renamed from src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp)831
-rw-r--r--src/core/cpu/kernels/CpuCastKernel.h82
-rw-r--r--src/core/gpu/cl/ClKernelLibrary.cpp8
-rw-r--r--src/core/gpu/cl/kernels/ClCastKernel.cpp (renamed from src/core/CL/kernels/CLDepthConvertLayerKernel.cpp)119
-rw-r--r--src/core/gpu/cl/kernels/ClCastKernel.h79
-rw-r--r--src/graph/backends/CL/CLNodeValidator.cpp6
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp11
-rw-r--r--src/runtime/CL/functions/CLCast.cpp41
-rw-r--r--src/runtime/CL/functions/CLDepthConvertLayer.cpp45
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp11
-rw-r--r--src/runtime/CL/functions/CLLSTMLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLLSTMLayerQuantized.cpp1
-rw-r--r--src/runtime/CL/functions/CLQLSTMLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLRNNLayer.cpp1
-rw-r--r--src/runtime/NEON/functions/NECast.cpp42
-rw-r--r--src/runtime/NEON/functions/NEDepthConvertLayer.cpp45
-rw-r--r--src/runtime/cpu/operators/CpuCast.cpp44
-rw-r--r--src/runtime/cpu/operators/CpuCast.h73
-rw-r--r--src/runtime/gpu/cl/operators/ClCast.cpp45
-rw-r--r--src/runtime/gpu/cl/operators/ClCast.h74
-rw-r--r--tests/validation/CL/DepthConvertLayer.cpp53
-rw-r--r--tests/validation/CL/UNIT/WeightsRetention.cpp1
-rw-r--r--tests/validation/NEON/DepthConvertLayer.cpp46
39 files changed, 1181 insertions, 834 deletions
diff --git a/Android.bp b/Android.bp
index 4be2bfd55d..c4438f9e76 100644
--- a/Android.bp
+++ b/Android.bp
@@ -85,7 +85,6 @@ cc_library_static {
"src/core/CL/kernels/CLComparisonKernel.cpp",
"src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp",
"src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp",
- "src/core/CL/kernels/CLDepthConvertLayerKernel.cpp",
"src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp",
"src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp",
"src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp",
@@ -159,7 +158,6 @@ cc_library_static {
"src/core/NEON/kernels/NECol2ImKernel.cpp",
"src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.cpp",
"src/core/NEON/kernels/NECropKernel.cpp",
- "src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp",
"src/core/NEON/kernels/NEDepthToSpaceLayerKernel.cpp",
"src/core/NEON/kernels/NEFFTDigitReverseKernel.cpp",
"src/core/NEON/kernels/NEFFTRadixStageKernel.cpp",
@@ -278,6 +276,7 @@ cc_library_static {
"src/core/Version.cpp",
"src/core/cpu/kernels/CpuActivationKernel.cpp",
"src/core/cpu/kernels/CpuAddKernel.cpp",
+ "src/core/cpu/kernels/CpuCastKernel.cpp",
"src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp",
"src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp",
"src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp",
@@ -343,6 +342,7 @@ cc_library_static {
"src/core/gpu/cl/ClKernelLibrary.cpp",
"src/core/gpu/cl/kernels/ClActivationKernel.cpp",
"src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp",
+ "src/core/gpu/cl/kernels/ClCastKernel.cpp",
"src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp",
"src/core/gpu/cl/kernels/ClCopyKernel.cpp",
"src/core/gpu/cl/kernels/ClCropKernel.cpp",
@@ -631,6 +631,7 @@ cc_library_static {
"src/runtime/Utils.cpp",
"src/runtime/cpu/operators/CpuActivation.cpp",
"src/runtime/cpu/operators/CpuAdd.cpp",
+ "src/runtime/cpu/operators/CpuCast.cpp",
"src/runtime/cpu/operators/CpuConcatenate.cpp",
"src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.cpp",
"src/runtime/cpu/operators/CpuCopy.cpp",
@@ -655,6 +656,7 @@ cc_library_static {
"src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp",
"src/runtime/gpu/cl/operators/ClActivation.cpp",
"src/runtime/gpu/cl/operators/ClAdd.cpp",
+ "src/runtime/gpu/cl/operators/ClCast.cpp",
"src/runtime/gpu/cl/operators/ClConcatenate.cpp",
"src/runtime/gpu/cl/operators/ClConvertFullyConnectedWeights.cpp",
"src/runtime/gpu/cl/operators/ClCopy.cpp",
diff --git a/SConscript b/SConscript
index a009d1f007..e5f7a8a938 100644
--- a/SConscript
+++ b/SConscript
@@ -299,6 +299,7 @@ if env['neon']:
runtime_files += Glob('src/runtime/NEON/functions/assembly/*.cpp')
cpu_kernel_hp_files = ['src/core/cpu/kernels/CpuActivationKernel.cpp',
+ 'src/core/cpu/kernels/CpuCastKernel.cpp',
'src/core/cpu/kernels/CpuDepthwiseConvolutionNativeKernel.cpp',
'src/core/cpu/kernels/CpuDirectConvolutionKernel.cpp',
'src/core/cpu/kernels/CpuDirectConvolutionOutputStageKernel.cpp',
@@ -350,6 +351,7 @@ if env['neon']:
'src/cpu/CpuTensor.cpp'
]
cpu_operator_hp_files = ['src/runtime/cpu/operators/CpuActivation.cpp',
+ 'src/runtime/cpu/operators/CpuCast.cpp',
'src/runtime/cpu/operators/CpuDepthwiseConvolution.cpp',
'src/runtime/cpu/operators/CpuDepthwiseConvolutionAssemblyDispatch.cpp',
'src/runtime/cpu/operators/CpuDirectConvolution.cpp',
diff --git a/arm_compute/runtime/CL/functions/CLCast.h b/arm_compute/runtime/CL/functions/CLCast.h
index 6e4cf62547..d2cea7a8a2 100644
--- a/arm_compute/runtime/CL/functions/CLCast.h
+++ b/arm_compute/runtime/CL/functions/CLCast.h
@@ -24,10 +24,11 @@
#ifndef ARM_COMPUTE_CLCAST_H
#define ARM_COMPUTE_CLCAST_H
+#include "arm_compute/runtime/IFunction.h"
+
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-#include <cstdint>
+#include <memory>
namespace arm_compute
{
@@ -35,10 +36,22 @@ class CLCompileContext;
class ICLTensor;
class ITensorInfo;
-/** Basic function to run @ref CLDepthConvertLayerKernel. */
-class CLCast : public ICLSimpleFunction
+/** Basic function to run @ref opencl::kernels::ClCastKernel */
+class CLCast : public IFunction
{
public:
+ /** Constructor */
+ CLCast();
+ /** Destructor */
+ ~CLCast();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLCast(const CLCast &) = delete;
+ /** Default move constructor */
+ CLCast(CLCast &&);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLCast &operator=(const CLCast &) = delete;
+ /** Default move assignment operator */
+ CLCast &operator=(CLCast &&);
/** Initialize the function's source, destination
*
* Valid data layouts:
@@ -91,6 +104,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLCAST_H*/
diff --git a/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h b/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
index 34dfdd7f3a..58deb7ec40 100644
--- a/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
@@ -24,10 +24,11 @@
#ifndef ARM_COMPUTE_CLDEPTHCONVERT_H
#define ARM_COMPUTE_CLDEPTHCONVERT_H
+#include "arm_compute/runtime/IFunction.h"
+
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-#include <cstdint>
+#include <memory>
namespace arm_compute
{
@@ -35,10 +36,22 @@ class CLCompileContext;
class ICLTensor;
class ITensorInfo;
-/** Basic function to run @ref CLDepthConvertLayerKernel. */
-class CLDepthConvertLayer : public ICLSimpleFunction
+/** Basic function to run @ref opencl::kernels::ClCastKernel */
+class CLDepthConvertLayer : public IFunction
{
public:
+ /** Constructor */
+ CLDepthConvertLayer();
+ /** Destructor */
+ ~CLDepthConvertLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthConvertLayer(const CLDepthConvertLayer &) = delete;
+ /** Default move constructor */
+ CLDepthConvertLayer(CLDepthConvertLayer &&);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthConvertLayer &operator=(const CLDepthConvertLayer &) = delete;
+ /** Default move assignment operator */
+ CLDepthConvertLayer &operator=(CLDepthConvertLayer &&);
/** Initialize the function's source, destination
*
* Valid data layouts:
@@ -94,6 +107,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLDEPTHCONVERT_H*/
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
index e5de45c34f..3d2dbdb104 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
@@ -34,7 +34,6 @@ class CLCompileContext;
class IMemoryManager;
class ICLTensor;
class ITensorInfo;
-class CLDepthConvertLayerKernel;
class CLGEMMLowpMatrixMultiplyNativeKernel;
class CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel;
class CLGEMMLowpOffsetContributionKernel;
@@ -49,6 +48,14 @@ class ClGemmReshapeRhsMatrixKernel;
} // namespace kernels
} // namespace opencl
+namespace opencl
+{
+namespace kernels
+{
+class ClCastKernel;
+} // namespace kernels
+} // namespace opencl
+
/** Basic function to execute GEMMLowpMatrixMultiplyCore on OpenCL. */
class CLGEMMLowpMatrixMultiplyCore : public IFunction
{
@@ -143,7 +150,7 @@ private:
MemoryGroup _memory_group;
// Kernels used
- std::unique_ptr<CLDepthConvertLayerKernel> _weights_to_qasymm8;
+ std::unique_ptr<opencl::kernels::ClCastKernel> _weights_to_qasymm8;
std::unique_ptr<CLGEMMLowpMatrixMultiplyNativeKernel> _mm_native_kernel;
std::unique_ptr<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel> _mm_reshaped_only_rhs_kernel;
std::unique_ptr<opencl::kernels::ClGemmReshapeRhsMatrixKernel> _mtx_b_reshape_kernel;
diff --git a/arm_compute/runtime/NEON/functions/NECast.h b/arm_compute/runtime/NEON/functions/NECast.h
index eb7de1fadb..30499f5ecf 100644
--- a/arm_compute/runtime/NEON/functions/NECast.h
+++ b/arm_compute/runtime/NEON/functions/NECast.h
@@ -24,20 +24,35 @@
#ifndef ARM_COMPUTE_NECAST_H
#define ARM_COMPUTE_NECAST_H
+#include "arm_compute/runtime/IFunction.h"
+
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
+
+#include <memory>
namespace arm_compute
{
class ITensor;
class ITensorInfo;
-/** Basic function to run @ref NEDepthConvertLayerKernel.
+/** Basic function to run @ref cpu::kernels::CpuCastKernel.
* This function ignores the scale and zeroPoint of quanized tensors,so QASYMM8 input is treated as uint8 values.
*/
-class NECast : public INESimpleFunctionNoBorder
+class NECast : public IFunction
{
public:
+ /** Constructor */
+ NECast();
+ /** Destructor */
+ ~NECast();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NECast(const NECast &) = delete;
+ /** Default move constructor */
+ NECast(NECast &&);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NECast &operator=(const NECast &) = delete;
+ /** Default move assignment operator */
+ NECast &operator=(NECast &&);
/** Initialize the function's source, destination
*
* Valid data layouts:
@@ -71,6 +86,13 @@ public:
* @return a status
*/
static Status validate(ITensorInfo *input, ITensorInfo *output, ConvertPolicy policy);
+
+ // Inherited methods overridden
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_NECAST_H*/
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
index 17cf539717..eb0724ae12 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
@@ -24,28 +24,33 @@
#ifndef ARM_COMPUTE_NEDEPTHCONVERT_H
#define ARM_COMPUTE_NEDEPTHCONVERT_H
+#include "arm_compute/runtime/IFunction.h"
+
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
-#include <cstdint>
+#include <memory>
namespace arm_compute
{
class ITensor;
class ITensorInfo;
-/**Basic function to run @ref NEDepthConvertLayerKernel */
-class NEDepthConvertLayer : public INESimpleFunctionNoBorder
+/**Basic function to run @ref cpu::kernels::CpuCastKernel */
+class NEDepthConvertLayer : public IFunction
{
public:
- /* Contructor */
- NEDepthConvertLayer() = default;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ /** Constructor */
+ NEDepthConvertLayer();
+ /** Destructor */
+ ~NEDepthConvertLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
NEDepthConvertLayer(const NEDepthConvertLayer &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- const NEDepthConvertLayer &operator=(const NEDepthConvertLayer &) = delete;
- /** Default destructor */
- ~NEDepthConvertLayer() = default;
+ /** Default move constructor */
+ NEDepthConvertLayer(NEDepthConvertLayer &&);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEDepthConvertLayer &operator=(const NEDepthConvertLayer &) = delete;
+ /** Default move assignment operator */
+ NEDepthConvertLayer &operator=(NEDepthConvertLayer &&);
/** Initialize the function's source, destination
*
* Valid data layouts:
@@ -80,6 +85,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift = 0);
+
+ // Inherited methods overridden
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_NEDEPTHCONVERT_H*/
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index a975e8b35e..557eff0779 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -269,7 +269,7 @@ v20.11 Public major release
- @ref CLGEMMLowpMatrixMultiplyReshapedKernel
- @ref CLFuseBatchNormalizationKernel
- @ref CLDepthwiseConvolutionLayerNativeKernel
- - @ref CLDepthConvertLayerKernel
+ - CLDepthConvertLayerKernel
- CLCopyKernel
- @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
- CLActivationLayerKernel
@@ -549,7 +549,7 @@ v20.08 Public major release
- @ref NENormalizationLayerKernel
- @ref NEL2NormalizeLayerKernel
- NEFillArrayKernel
- - @ref NEDepthConvertLayerKernel
+ - NEDepthConvertLayerKernel
- @ref NERangeKernel
- @ref NEPriorBoxLayer
- Removed OpenCL kernels / functions:
@@ -587,7 +587,7 @@ v20.05 Public major release
- @ref NEConvolutionLayerReshapeWeights
- @ref NEIm2ColKernel
- NEIm2Col
- - @ref NEDepthConvertLayerKernel
+ - NEDepthConvertLayerKernel
- @ref NEDepthConvertLayer
- @ref NEGEMMConvolutionLayer
- NEGEMMAssemblyDispatch
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index 1302d52180..c59eebacbb 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -35,7 +35,6 @@
#include "src/core/CL/kernels/CLComparisonKernel.h"
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
diff --git a/src/core/CL/cl_kernels/depth_convert.cl b/src/core/CL/cl_kernels/cast.cl
index a888d7b9bc..036a683ec7 100644
--- a/src/core/CL/cl_kernels/depth_convert.cl
+++ b/src/core/CL/cl_kernels/cast.cl
@@ -31,7 +31,7 @@
#define CONVERT_UP(x, type) CONVERT(x, type)
-/** This function performs a down-scaling depth conversion.
+/** This function performs a down-casting
*
* @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info.
*
@@ -56,12 +56,10 @@
* @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] shift The integer shift amount value. Supported data types: S32
*/
-__kernel void convert_depth_down(
+__kernel void cast_down(
TENSOR3D_DECLARATION(in),
- TENSOR3D_DECLARATION(out),
- const int shift)
+ TENSOR3D_DECLARATION(out))
{
int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -82,12 +80,12 @@ __kernel void convert_depth_down(
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#else /* defined(IS_DATA_TYPE_FLOAT) */
VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
- res0 = CONVERT_DOWN(in_data >> shift, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
+ res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif /* defined(IS_DATA_TYPE_FLOAT) */
}
-/** This function performs a up-scaling depth conversion.
+/** This function performs a up-casting
*
* @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT:
* e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short
@@ -110,12 +108,10 @@ __kernel void convert_depth_down(
* @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] shift The integer shift amount value. Supported data types: S32
*/
-__kernel void convert_depth_up(
+__kernel void cast_up(
TENSOR3D_DECLARATION(in),
- TENSOR3D_DECLARATION(out),
- const int shift)
+ TENSOR3D_DECLARATION(out))
{
int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -132,7 +128,7 @@ __kernel void convert_depth_up(
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#else /* defined(IS_DATA_TYPE_FLOAT) */
VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
- res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)) << shift;
+ res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif /* defined(IS_DATA_TYPE_FLOAT) */
}
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.h b/src/core/CL/kernels/CLDepthConvertLayerKernel.h
deleted file mode 100644
index 8b511c6707..0000000000
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H
-#define ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLSimple3DKernel.h"
-
-#include <cstdint>
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the depth conversion kernel. */
-class CLDepthConvertLayerKernel : public ICLSimple3DKernel
-{
-public:
- /** Set the input and output of the kernel.
- *
- * Valid conversions Input -> Output :
- *
- * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
- * - U8 -> S8, U16, S16, U32, S32, F16, F32
- * - U16 -> U8, S8, S16, U32, S32, F16, F32
- * - S16 -> U8, S8, U16, U32, S32, F16, F32
- * - U32 -> U8, S8, U16, S16, S32, F16, F32
- * - S32 -> U8, S8, U16, S16, U32, F16, F32
- * - F16 -> U8, S8, U16, S16, U32, F32
- * - F32 -> U8, S8, U16, S16, U32, F16
- *
- * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- */
- void configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
- /** Set the input and output of the kernel.
- *
- * Valid conversions Input -> Output :
- *
- * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
- * - U8 -> S8, U16, S16, U32, S32, F16, F32
- * - U16 -> U8, S8, S16, U32, S32, F16, F32
- * - S16 -> U8, S8, U16, U32, S32, F16, F32
- * - U32 -> U8, S8, U16, S16, S32, F16, F32
- * - S32 -> U8, S8, U16, S16, U32, F16, F32
- * - F16 -> U8, S8, U16, S16, U32, F32
- * - F32 -> U8, S8, U16, S16, U32, F16
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel
- *
- * @param[in] input Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[in] output Destination tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift);
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H */
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h
index b11e135a0d..ea15f4eddd 100644
--- a/src/core/NEON/NEKernels.h
+++ b/src/core/NEON/NEKernels.h
@@ -35,7 +35,6 @@
#include "src/core/NEON/kernels/NECol2ImKernel.h"
#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
#include "src/core/NEON/kernels/NECropKernel.h"
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
#include "src/core/NEON/kernels/NEDepthToSpaceLayerKernel.h"
#include "src/core/NEON/kernels/NEFFTDigitReverseKernel.h"
#include "src/core/NEON/kernels/NEFFTRadixStageKernel.h"
diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h b/src/core/NEON/kernels/NEDepthConvertLayerKernel.h
deleted file mode 100644
index 30fe1ed2e6..0000000000
--- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_DEPTHCONVERTKERNEL_H
-#define ARM_COMPUTE_DEPTHCONVERTKERNEL_H
-
-#include "src/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Depth conversion kernel
- * This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values.
- */
-class NEDepthConvertLayerKernel : public INEKernel
-{
-public:
- const char *name() const override
- {
- return "NEDepthConvertLayerKernel";
- }
- /** Default constructor*/
- NEDepthConvertLayerKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthConvertLayerKernel(const NEDepthConvertLayerKernel &) = delete;
- /** Default move constructor */
- NEDepthConvertLayerKernel(NEDepthConvertLayerKernel &&) = default;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthConvertLayerKernel &operator=(const NEDepthConvertLayerKernel &) = delete;
- /** Default move assignment operator */
- NEDepthConvertLayerKernel &operator=(NEDepthConvertLayerKernel &&) = default;
- /** Default destructor */
- ~NEDepthConvertLayerKernel() = default;
- /** Set the input and output of the kernel
- *
- * Valid conversions Input -> Output :
- *
- * - QASYMM8_SIGNED -> S16, S32, F32, F16
- * - QASYMM8 -> U16, S16, S32, F32, F16
- * - U8 -> U16, S16, S32, F32, F16
- * - U16 -> U8, U32
- * - S16 -> QASYMM8_SIGNED, U8, S32
- * - BFLOAT16 -> F32
- * - F16 -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8
- * - S32 -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8
- * - F32 -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
- *
- * @param[in] input The input tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
- * @param[out] output The output tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
- * @param[in] policy Conversion policy.
- * @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
- */
- void configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0);
- /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConvertLayerKernel
- *
- * @param[in] input Source tensor info. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
- * @param[in] output Destination tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift = 0);
-
- // Inherited methods overridden:
- void run(const Window &window, const ThreadInfo &info) override;
-
-private:
- const ITensor *_input;
- ITensor *_output;
- ConvertPolicy _policy;
- uint32_t _shift;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEDEPTHCONVERTKERNEL_H */
diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp b/src/core/cpu/kernels/CpuCastKernel.cpp
index 4b5208eeb6..46f3c330ef 100644
--- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
+++ b/src/core/cpu/kernels/CpuCastKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
+#include "src/core/cpu/kernels/CpuCastKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -36,161 +36,159 @@
#include "src/core/helpers/WindowHelpers.h"
#include "support/SaturateCast.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
{
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(output);
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(output);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(dst);
ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON(input == output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16,
DataType::F32, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16,
DataType::U32, DataType::S32, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8_SIGNED && (output->data_type() != DataType::S16 && output->data_type() != DataType::S32
- && output->data_type() != DataType::F16 && output->data_type() != DataType::F32),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8_SIGNED && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::S32
+ && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32),
"Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16
- && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16
+ && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32),
"Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16
- && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16
+ && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32),
"Only data_types supported [in] U8 -> [out] U16, S16, S32, F16, F32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U16 && (output->data_type() != DataType::U8 && output->data_type() != DataType::U32),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U16 && (dst->data_type() != DataType::U8 && dst->data_type() != DataType::U32),
"Only data_types supported [in] U16 -> [out] U8, U32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::U8 && output->data_type() != DataType::S32),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::U8 && dst->data_type() != DataType::S32),
"Only data_types supported [in] S16 -> [out] U8, S32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::BFLOAT16 && output->data_type() != DataType::F32,
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::BFLOAT16 && dst->data_type() != DataType::F32,
"Only data_types supported [in] BFLOAT16 -> [out] F32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8
- && output->data_type() != DataType::U8
- && output->data_type() != DataType::F32 && output->data_type() != DataType::S32),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8
+ && dst->data_type() != DataType::U8
+ && dst->data_type() != DataType::F32 && dst->data_type() != DataType::S32),
"Only data_types supported [in] F16 -> [out] QASYMM8, F32, S32, U8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8
- && output->data_type() != DataType::F16 && output->data_type() != DataType::BFLOAT16
- && output->data_type() != DataType::S32 && output->data_type() != DataType::U8),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8
+ && dst->data_type() != DataType::F16 && dst->data_type() != DataType::BFLOAT16
+ && dst->data_type() != DataType::S32 && dst->data_type() != DataType::U8),
"Only data_types supported [in] F32 -> [out] QASYMM8, BFLOAT16, F16, S32, U8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8
- && output->data_type() != DataType::F16
- && output->data_type() != DataType::F32 && output->data_type() != DataType::U8),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8
+ && dst->data_type() != DataType::F16
+ && dst->data_type() != DataType::F32 && dst->data_type() != DataType::U8),
"Only data_types supported [in] S32 -> [out] QASYMM8, F16, F32, U8");
- // Validate in case of configured output
- if(output->total_size() > 0)
+ // Validate in case of configured dst
+ if(dst->total_size() > 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
}
return Status{};
}
} // namespace
-NEDepthConvertLayerKernel::NEDepthConvertLayerKernel()
- : _input(nullptr), _output(nullptr), _policy(), _shift(0)
+void CpuCastKernel::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
{
-}
-
-void NEDepthConvertLayerKernel::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
- set_shape_if_empty(*output->info(), input->info()->tensor_shape());
+ // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given)
+ set_shape_if_empty(*dst, src->tensor_shape());
- _input = input;
- _output = output;
_policy = policy;
- _shift = shift;
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy));
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*src, Steps());
ICPPKernel::configure(win);
}
-Status NEDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
+Status CpuCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy));
return Status{};
}
-void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info)
+void CpuCastKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
- ARM_COMPUTE_ERROR_ON_NULLPTR(_input, _output);
- ARM_COMPUTE_ERROR_ON(_input == _output);
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
const int window_step_x = 16;
+ const ITensor *_src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ ITensor *_dst = tensors.get_tensor(TensorType::ACL_DST);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+ ARM_COMPUTE_ERROR_ON(_src == _dst);
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+
Window win{ window };
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- Iterator input(_input, win);
- Iterator output(_output, win);
+ Iterator src(_src, win);
+ Iterator dst(_dst, win);
- switch(_input->info()->data_type())
+ switch(_src->info()->data_type())
{
case DataType::QASYMM8_SIGNED:
{
- const int16x8_t b = vdupq_n_s16(_shift);
-
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
case DataType::S16:
{
/* Up-conversion QASYMM8_SIGNED -> S16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
- int x = window_start_x;
+ const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int16_t *>(dst.ptr());
+ int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
+ const int8x16_t texels_s8 = vld1q_s8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
- vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+ vmovl_s8(vget_low_s8(texels_s8)),
+ vmovl_s8(vget_high_s8(texels_s8))
}
};
- vst1q_s16(output_ptr + x, texels.val[0]);
- vst1q_s16(output_ptr + x + 8, texels.val[1]);
+ vst1q_s16(dst_ptr + x, texels.val[0]);
+ vst1q_s16(dst_ptr + x + 8, texels.val[1]);
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int16_t>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<int16_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::S32:
@@ -198,35 +196,35 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion QASYMM8_SIGNED -> S32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
- int x = window_start_x;
+ const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
+ int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
+ const int8x16_t texels_s8 = vld1q_s8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
- vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+ vmovl_s8(vget_low_s8(texels_s8)),
+ vmovl_s8(vget_high_s8(texels_s8))
}
};
- vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
- vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
- vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
- vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+ vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
+ vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
+ vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
+ vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::F32:
@@ -234,34 +232,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion QASYMM8_SIGNED -> F32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
+ const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(src.ptr()));
const int16x8x2_t texels =
{
{
- vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
- vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+ vmovl_s8(vget_low_s8(texels_s8)),
+ vmovl_s8(vget_high_s8(texels_s8))
}
};
- vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
- vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
- vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
- vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+ vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
+ vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
+ vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
+ vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<float>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -270,38 +268,38 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion QASYMM8_SIGNED -> F16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
- int x = window_start_x;
+ const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
+ int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
+ const int8x16_t texels_s8 = vld1q_s8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
- vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+ vmovl_s8(vget_low_s8(texels_s8)),
+ vmovl_s8(vget_high_s8(texels_s8))
}
};
- vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0]));
- vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
+ vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0]));
+ vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
}
@@ -309,43 +307,40 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
case DataType::QASYMM8:
case DataType::U8:
{
- const int16x8_t b = vdupq_n_s16(_shift);
-
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
case DataType::S16:
{
/* Up-conversion U8 -> S16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int16_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+ const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+ vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+ vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
}
};
- vst1q_s16(output_ptr + x, texels.val[0]);
- vst1q_s16(output_ptr + x + 8, texels.val[1]);
+ vst1q_s16(dst_ptr + x, texels.val[0]);
+ vst1q_s16(dst_ptr + x + 8, texels.val[1]);
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- auto in = static_cast<int32_t>(*(input_ptr + x));
- *(output_ptr + x) = in << _shift;
+ *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::S32:
@@ -353,36 +348,35 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion U8 -> S32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+ const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+ vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+ vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
}
};
- vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
- vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
- vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
- vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+ vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
+ vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
+ vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
+ vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- auto in = static_cast<uint32_t>(*(input_ptr + x));
- *(output_ptr + x) = in << _shift;
+ *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::F32:
@@ -390,35 +384,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion U8 -> F32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+ const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+ vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+ vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
}
};
- vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
- vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
- vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
- vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+ vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
+ vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
+ vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
+ vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- auto in = static_cast<uint32_t>(*(input_ptr + x));
- *(output_ptr + x) = static_cast<float>(in << _shift);
+ *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -427,32 +420,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion U8 -> F16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+ const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
const int16x8x2_t texels =
{
{
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
- vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+ vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+ vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
}
};
- vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0]));
- vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
+ vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0]));
+ vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -461,55 +454,53 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Up-conversion U8 -> U16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint16_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint16_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+ const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
const uint16x8x2_t texels =
{
{
- vshlq_u16(vmovl_u8(vget_low_u8(texels_u8)), b),
- vshlq_u16(vmovl_u8(vget_high_u8(texels_u8)), b)
+ vmovl_u8(vget_low_u8(texels_u8)),
+ vmovl_u8(vget_high_u8(texels_u8))
}
};
- vst1q_u16(output_ptr + x, texels.val[0]);
- vst1q_u16(output_ptr + x + 8, texels.val[1]);
+ vst1q_u16(dst_ptr + x, texels.val[0]);
+ vst1q_u16(dst_ptr + x + 8, texels.val[1]);
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<uint16_t>(*(input_ptr + x)) << _shift;
+ *(dst_ptr + x) = static_cast<uint16_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
}
case DataType::S16:
{
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
case DataType::QASYMM8_SIGNED:
{
- const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift));
-
/* Down-conversion S16 -> QASYMM8_SIGNED */
if(ConvertPolicy::SATURATE == _policy)
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -517,28 +508,28 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int16x8x2_t texels =
{
{
- vqshlq_s16(vld1q_s16(input_ptr + x), b),
- vqshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+ vld1q_s16(src_ptr + x),
+ vld1q_s16(src_ptr + x + 8)
}
};
- vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1])));
+ vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
else
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -546,35 +537,33 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int16x8x2_t texels =
{
{
- vshlq_s16(vld1q_s16(input_ptr + x), b),
- vshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+ vld1q_s16(src_ptr + x),
+ vld1q_s16(src_ptr + x + 8)
}
};
- vst1q_s8(output_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1])));
+ vst1q_s8(dst_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = static_cast<int8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
break;
}
case DataType::U8:
{
- const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift));
-
/* Down-conversion S16 -> U8 */
if(ConvertPolicy::SATURATE == _policy)
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -582,28 +571,28 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int16x8x2_t texels =
{
{
- vqshlq_s16(vld1q_s16(input_ptr + x), b),
- vqshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+ vld1q_s16(src_ptr + x),
+ vld1q_s16(src_ptr + x + 8)
}
};
- vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1])));
+ vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
else
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -611,34 +600,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int16x8x2_t texels =
{
{
- vshlq_s16(vld1q_s16(input_ptr + x), b),
- vshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+ vld1q_s16(src_ptr + x),
+ vld1q_s16(src_ptr + x + 8)
}
};
- vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])),
- vmovn_u16(vreinterpretq_u16_s16(texels.val[1]))));
+ vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])),
+ vmovn_u16(vreinterpretq_u16_s16(texels.val[1]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
break;
}
case DataType::S32:
{
- const int32x4_t b = vdupq_n_s32(_shift);
-
/* Up-conversion S16 -> S32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -646,56 +633,54 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int16x8x2_t texels =
{
{
- vld1q_s16(input_ptr + x),
- vld1q_s16(input_ptr + x + 8)
+ vld1q_s16(src_ptr + x),
+ vld1q_s16(src_ptr + x + 8)
}
};
const int32x4x4_t texels_s32 =
{
{
- vshlq_s32(vmovl_s16(vget_low_s16(texels.val[0])), b),
- vshlq_s32(vmovl_s16(vget_high_s16(texels.val[0])), b),
- vshlq_s32(vmovl_s16(vget_low_s16(texels.val[1])), b),
- vshlq_s32(vmovl_s16(vget_high_s16(texels.val[1])), b)
+ vmovl_s16(vget_low_s16(texels.val[0])),
+ vmovl_s16(vget_high_s16(texels.val[0])),
+ vmovl_s16(vget_low_s16(texels.val[1])),
+ vmovl_s16(vget_high_s16(texels.val[1]))
}
};
- vst1q_s32(output_ptr + x, texels_s32.val[0]);
- vst1q_s32(output_ptr + x + 4, texels_s32.val[1]);
- vst1q_s32(output_ptr + x + 8, texels_s32.val[2]);
- vst1q_s32(output_ptr + x + 12, texels_s32.val[3]);
+ vst1q_s32(dst_ptr + x, texels_s32.val[0]);
+ vst1q_s32(dst_ptr + x + 4, texels_s32.val[1]);
+ vst1q_s32(dst_ptr + x + 8, texels_s32.val[2]);
+ vst1q_s32(dst_ptr + x + 12, texels_s32.val[3]);
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
}
case DataType::U16:
{
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
case DataType::U8:
{
- const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift));
-
/* Down-conversion U16 -> U8 */
if(ConvertPolicy::SATURATE == _policy)
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -703,28 +688,28 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const uint16x8x2_t texels =
{
{
- vqshlq_u16(vld1q_u16(input_ptr + x), b),
- vqshlq_u16(vld1q_u16(input_ptr + x + 8), b)
+ vld1q_u16(src_ptr + x),
+ vld1q_u16(src_ptr + x + 8)
}
};
- vst1q_u8(output_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1])));
+ vst1q_u8(dst_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
else
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -732,34 +717,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const uint16x8x2_t texels =
{
{
- vshlq_u16(vld1q_u16(input_ptr + x), b),
- vshlq_u16(vld1q_u16(input_ptr + x + 8), b)
+ vld1q_u16(src_ptr + x),
+ vld1q_u16(src_ptr + x + 8)
}
};
- vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1])));
+ vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
break;
}
case DataType::U32:
{
- const int32x4_t b = vdupq_n_s32(_shift);
-
/* Up-conversion U16 -> U32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const uint16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint32_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint32_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -767,42 +750,42 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const uint16x8x2_t texels =
{
{
- vld1q_u16(input_ptr + x),
- vld1q_u16(input_ptr + x + 8)
+ vld1q_u16(src_ptr + x),
+ vld1q_u16(src_ptr + x + 8)
}
};
- vst1q_u32(output_ptr + x, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[0])), b));
- vst1q_u32(output_ptr + x + 4, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[0])), b));
- vst1q_u32(output_ptr + x + 8, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[1])), b));
- vst1q_u32(output_ptr + x + 12, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[1])), b));
+ vst1q_u32(dst_ptr + x, vmovl_u16(vget_low_u16(texels.val[0])));
+ vst1q_u32(dst_ptr + x + 4, vmovl_u16(vget_high_u16(texels.val[0])));
+ vst1q_u32(dst_ptr + x + 8, vmovl_u16(vget_low_u16(texels.val[1])));
+ vst1q_u32(dst_ptr + x + 12, vmovl_u16(vget_high_u16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<uint32_t>(*(input_ptr + x) << _shift);
+ *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
}
#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
case DataType::BFLOAT16:
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
case DataType::F32:
{
/* Up-conversion BFLOAT16 -> F32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const bfloat16 *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const bfloat16 *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -810,48 +793,45 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const uint16x8x2_t texels =
{
{
- vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr())),
- vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr()) + 8)
+ vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr())),
+ vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr()) + 8)
}
};
- vst1q_f32(reinterpret_cast<float *>(output.ptr()),
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()),
vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16)));
- vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4,
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 4,
vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16)));
- vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8,
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 8,
vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16)));
- vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12,
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 12,
vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16)));
}
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = float(*(input_ptr + x));
+ *(dst_ptr + x) = float(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type unsupported");
+ ARM_COMPUTE_ERROR("dst data type unsupported");
}
break;
#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
case DataType::QASYMM8_SIGNED:
{
- const float16_t scale_s = 1 << _shift;
- const float16x8_t scale = vdupq_n_f16(scale_s);
-
/* Down-conversion F16 -> QASYMM8_SIGNED (Always saturating) */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -859,34 +839,31 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float16x8x2_t texels =
{
{
- vmulq_f16(vld1q_f16(input_ptr + x), scale),
- vmulq_f16(vld1q_f16(input_ptr + x + 8), scale),
+ vld1q_f16(src_ptr + x),
+ vld1q_f16(src_ptr + x + 8),
}
};
- vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1]))));
+ vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- const float16_t scale_s = 1 << _shift;
- const float16x8_t scale = vdupq_n_f16(scale_s);
-
/* Down-conversion F16 -> QASYMM8/U8 (Always saturating) */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -894,34 +871,31 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float16x8x2_t texels =
{
{
- vmulq_f16(vld1q_f16(input_ptr + x), scale),
- vmulq_f16(vld1q_f16(input_ptr + x + 8), scale),
+ vld1q_f16(src_ptr + x),
+ vld1q_f16(src_ptr + x + 8),
}
};
- vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1]))));
+ vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::F32:
{
- const float scale_s = 1 << _shift;
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Up-conversion F16 -> F32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -929,35 +903,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float16x8x2_t texels =
{
{
- vld1q_f16(input_ptr + x),
- vld1q_f16(input_ptr + x + 8)
+ vld1q_f16(src_ptr + x),
+ vld1q_f16(src_ptr + x + 8)
}
};
- vst1q_f32(output_ptr + x, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale));
- vst1q_f32(output_ptr + x + 4, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale));
- vst1q_f32(output_ptr + x + 8, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale));
- vst1q_f32(output_ptr + x + 12, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale));
+ vst1q_f32(dst_ptr + x, vcvt_f32_f16(vget_low_f16(texels.val[0])));
+ vst1q_f32(dst_ptr + x + 4, vcvt_f32_f16(vget_high_f16(texels.val[0])));
+ vst1q_f32(dst_ptr + x + 8, vcvt_f32_f16(vget_low_f16(texels.val[1])));
+ vst1q_f32(dst_ptr + x + 12, vcvt_f32_f16(vget_high_f16(texels.val[1])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = static_cast<float>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::S32:
{
- const float scale_s = 1 << _shift;
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Up-conversion F16 -> S32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -965,45 +936,42 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float16x8x2_t texels =
{
{
- vld1q_f16(input_ptr + x),
- vld1q_f16(input_ptr + x + 8)
+ vld1q_f16(src_ptr + x),
+ vld1q_f16(src_ptr + x + 8)
}
};
- vst1q_s32(output_ptr + x, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale)));
- vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale)));
- vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale)));
- vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale)));
+ vst1q_s32(dst_ptr + x, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[0]))));
+ vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[0]))));
+ vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[1]))));
+ vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[1]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
case DataType::F32:
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
{
- const float scale_s = 1.f / (1 << _shift);
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Down-conversion F32 -> F16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1011,24 +979,24 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4x4_t texels =
{
{
- vmulq_f32(vld1q_f32(input_ptr + x), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 12), scale)
+ vld1q_f32(src_ptr + x),
+ vld1q_f32(src_ptr + x + 4),
+ vld1q_f32(src_ptr + x + 8),
+ vld1q_f32(src_ptr + x + 12)
}
};
- vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
- vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+ vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
+ vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
@@ -1038,37 +1006,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Down-conversion F32 -> BFLOAT16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<bfloat16 *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<bfloat16 *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(input.ptr()),
- reinterpret_cast<uint16_t *>(output.ptr()));
- wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(input.ptr()) + 8,
- reinterpret_cast<uint16_t *>(output.ptr()) + 8);
+ wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()),
+ reinterpret_cast<uint16_t *>(dst.ptr()));
+ wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()) + 8,
+ reinterpret_cast<uint16_t *>(dst.ptr()) + 8);
}
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = *(input_ptr + x);
+ *(dst_ptr + x) = *(src_ptr + x);
}
},
- input, output);
+ src, dst);
break;
}
#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
case DataType::S32:
{
- const float scale_s = 1.f / (1 << _shift);
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Conversion F32 -> S32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1076,39 +1041,36 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4x4_t texels =
{
{
- vmulq_f32(vld1q_f32(input_ptr + x), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
+ vld1q_f32(src_ptr + x),
+ vld1q_f32(src_ptr + x + 4),
+ vld1q_f32(src_ptr + x + 8),
+ vld1q_f32(src_ptr + x + 12),
}
};
- vst1q_s32(output_ptr + x, vcvtq_s32_f32(texels.val[0]));
- vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(texels.val[1]));
- vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(texels.val[2]));
- vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(texels.val[3]));
+ vst1q_s32(dst_ptr + x, vcvtq_s32_f32(texels.val[0]));
+ vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(texels.val[1]));
+ vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(texels.val[2]));
+ vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(texels.val[3]));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- const float scale_s = 1.f / (1 << _shift);
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Down-conversion F32 -> U8 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1116,36 +1078,33 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4x4_t texels =
{
{
- vmulq_f32(vld1q_f32(input_ptr + x), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
+ vld1q_f32(src_ptr + x),
+ vld1q_f32(src_ptr + x + 4),
+ vld1q_f32(src_ptr + x + 8),
+ vld1q_f32(src_ptr + x + 12),
}
};
- vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1])))));
- vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3])))));
+ vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1])))));
+ vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3])))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::QASYMM8_SIGNED:
{
- const float scale_s = 1.f / (1 << _shift);
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Down-conversion F32 -> QASYMM8_SIGNED */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1153,45 +1112,42 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4x4_t texels =
{
{
- vmulq_f32(vld1q_f32(input_ptr + x), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
- vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
+ vld1q_f32(src_ptr + x),
+ vld1q_f32(src_ptr + x + 4),
+ vld1q_f32(src_ptr + x + 8),
+ vld1q_f32(src_ptr + x + 12),
}
};
- vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1])))));
- vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3])))));
+ vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1])))));
+ vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3])))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
case DataType::S32:
- switch(_output->info()->data_type())
+ switch(_dst->info()->data_type())
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
{
- const float scale_s = 1.f / (1 << _shift);
- const float32x4_t scale = vdupq_n_f32(scale_s);
-
/* Down-conversion S32 -> F16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1199,37 +1155,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4x4_t texels =
{
{
- vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x)), scale),
- vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 4)), scale),
- vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 8)), scale),
- vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 12)), scale)
+ vcvtq_f32_s32(vld1q_s32(src_ptr + x)),
+ vcvtq_f32_s32(vld1q_s32(src_ptr + x + 4)),
+ vcvtq_f32_s32(vld1q_s32(src_ptr + x + 8)),
+ vcvtq_f32_s32(vld1q_s32(src_ptr + x + 12))
}
};
- vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
- vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+ vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
+ vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
case DataType::F32:
{
- const int scale_s = 1.f / (1 << _shift);
- const int32x4_t scale = vdupq_n_s32(scale_s);
-
/* Conversion S32 -> F32 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1237,39 +1190,37 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4x4_t texels =
{
{
- vmulq_s32(vld1q_s32(input_ptr + x), scale),
- vmulq_s32(vld1q_s32(input_ptr + x + 4), scale),
- vmulq_s32(vld1q_s32(input_ptr + x + 8), scale),
- vmulq_s32(vld1q_s32(input_ptr + x + 12), scale),
+ vld1q_s32(src_ptr + x),
+ vld1q_s32(src_ptr + x + 4),
+ vld1q_s32(src_ptr + x + 8),
+ vld1q_s32(src_ptr + x + 12),
}
};
- vst1q_f32(output_ptr + x, vcvtq_f32_s32(texels.val[0]));
- vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(texels.val[1]));
- vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(texels.val[2]));
- vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(texels.val[3]));
+ vst1q_f32(dst_ptr + x, vcvtq_f32_s32(texels.val[0]));
+ vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(texels.val[1]));
+ vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(texels.val[2]));
+ vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(texels.val[3]));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s);
+ *(dst_ptr + x) = static_cast<float>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
break;
}
case DataType::QASYMM8_SIGNED:
{
- const int32x4_t b = vdupq_n_s32(-static_cast<int32_t>(_shift));
-
/* Down-conversion S32 -> QASYMM8_SIGNED */
if(ConvertPolicy::SATURATE == _policy)
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1277,30 +1228,30 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4x4_t texels =
{
{
- vqshlq_s32(vld1q_s32(input_ptr + x), b),
- vqshlq_s32(vld1q_s32(input_ptr + x + 4), b),
- vqshlq_s32(vld1q_s32(input_ptr + x + 8), b),
- vqshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+ vld1q_s32(src_ptr + x),
+ vld1q_s32(src_ptr + x + 4),
+ vld1q_s32(src_ptr + x + 8),
+ vld1q_s32(src_ptr + x + 12),
}
};
- vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1]))));
- vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3]))));
+ vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1]))));
+ vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
else
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1308,39 +1259,37 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4x4_t texels =
{
{
- vshlq_s32(vld1q_s32(input_ptr + x), b),
- vshlq_s32(vld1q_s32(input_ptr + x + 4), b),
- vshlq_s32(vld1q_s32(input_ptr + x + 8), b),
- vshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+ vld1q_s32(src_ptr + x),
+ vld1q_s32(src_ptr + x + 4),
+ vld1q_s32(src_ptr + x + 8),
+ vld1q_s32(src_ptr + x + 12)
}
};
- vst1_s8(output_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1]))));
- vst1_s8(output_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3]))));
+ vst1_s8(dst_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1]))));
+ vst1_s8(dst_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = static_cast<int8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- const int32x4_t b = vdupq_n_s32(-static_cast<int32_t>(_shift));
-
/* Down-conversion S32 -> U8 */
if(ConvertPolicy::SATURATE == _policy)
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1348,30 +1297,30 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4x4_t texels =
{
{
- vqshlq_s32(vld1q_s32(input_ptr + x), b),
- vqshlq_s32(vld1q_s32(input_ptr + x + 4), b),
- vqshlq_s32(vld1q_s32(input_ptr + x + 8), b),
- vqshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+ vld1q_s32(src_ptr + x),
+ vld1q_s32(src_ptr + x + 4),
+ vld1q_s32(src_ptr + x + 8),
+ vld1q_s32(src_ptr + x + 12)
}
};
- vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1]))));
- vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3]))));
+ vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1]))));
+ vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3]))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
else
{
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
@@ -1379,32 +1328,40 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4x4_t texels =
{
{
- vshlq_s32(vld1q_s32(input_ptr + x), b),
- vshlq_s32(vld1q_s32(input_ptr + x + 4), b),
- vshlq_s32(vld1q_s32(input_ptr + x + 8), b),
- vshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+ vld1q_s32(src_ptr + x),
+ vld1q_s32(src_ptr + x + 4),
+ vld1q_s32(src_ptr + x + 8),
+ vld1q_s32(src_ptr + x + 12)
}
};
- vst1_u8(output_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1])))));
- vst1_u8(output_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3])))));
+ vst1_u8(dst_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1])))));
+ vst1_u8(dst_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3])))));
}
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
+ *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x));
}
},
- input, output);
+ src, dst);
}
break;
}
default:
- ARM_COMPUTE_ERROR("Output data type not supported");
+ ARM_COMPUTE_ERROR("dst data type not supported");
}
break;
default:
ARM_COMPUTE_ERROR("Not supported");
}
}
+
+const char *CpuCastKernel::name() const
+{
+ return "CpuCastKernel.cpp";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuCastKernel.h b/src/core/cpu/kernels/CpuCastKernel.h
new file mode 100644
index 0000000000..2a75c5850e
--- /dev/null
+++ b/src/core/cpu/kernels/CpuCastKernel.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_CAST_KERNEL_H
+#define ARM_COMPUTE_CPU_CAST_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Casts a given tensor to a new type
+ *
+ * @note When casting between quantized types the scale and zeroPoint are ignored
+ */
+class CpuCastKernel : public ICpuKernel
+{
+public:
+ CpuCastKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuCastKernel);
+ /** Set the src and dst of the kernel
+ *
+ * Valid conversions src -> dst :
+ *
+ * - QASYMM8_SIGNED -> S16, S32, F32, F16
+ * - QASYMM8 -> U16, S16, S32, F32, F16
+ * - U8 -> U16, S16, S32, F32, F16
+ * - U16 -> U8, U32
+ * - S16 -> QASYMM8_SIGNED, U8, S32
+ * - BFLOAT16 -> F32
+ * - F16 -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8
+ * - S32 -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8
+ * - F32 -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
+ *
+ * @param[in] src The src tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
+ * @param[out] dst The dst tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
+ * @param[in] policy Conversion policy.
+ */
+ void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to @ref CpuCastKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+private:
+ ConvertPolicy _policy{ ConvertPolicy::SATURATE };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_CAST_KERNEL_H */
diff --git a/src/core/gpu/cl/ClKernelLibrary.cpp b/src/core/gpu/cl/ClKernelLibrary.cpp
index 286ed4c021..b0458d7c3a 100644
--- a/src/core/gpu/cl/ClKernelLibrary.cpp
+++ b/src/core/gpu/cl/ClKernelLibrary.cpp
@@ -216,8 +216,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "concatenate_width_x2", "concatenate.cl" },
{ "concatenate_width_x4", "concatenate.cl" },
{ "col2im", "col2im.cl" },
- { "convert_depth_down", "depth_convert.cl" },
- { "convert_depth_up", "depth_convert.cl" },
+ { "cast_down", "cast.cl" },
+ { "cast_up", "cast.cl" },
{ "convert_fc_weights", "convert_fc_weights.cl" },
{ "copy_tensor", "copy_tensor.cl" },
{ "crop_tensor", "crop_tensor.cl" },
@@ -565,8 +565,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/deconvolution_layer.clembed"
},
{
- "depth_convert.cl",
-#include "./cl_kernels/depth_convert.clembed"
+ "cast.cl",
+#include "./cl_kernels/cast.clembed"
},
{
"depth_to_space.cl",
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/gpu/cl/kernels/ClCastKernel.cpp
index 0d5c7a4881..7a1d5c2824 100644
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClCastKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
+#include "src/core/gpu/cl/kernels/ClCastKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -33,93 +33,80 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-#include <cstddef>
-#include <set>
-#include <string>
+#include "support/Cast.h"
+#include "support/StringSupport.h"
namespace arm_compute
{
+namespace opencl
+{
+namespace kernels
+{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
{
ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input == output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input,
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src,
1,
DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
DataType::U16, DataType::U32, DataType::S32, DataType::F16,
DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output,
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst,
1,
DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
DataType::U16, DataType::U32, DataType::S32, DataType::F16,
DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == output->data_type(), "Input and output data types must be different");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
- ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == dst->data_type(), "src and dst data types must be different");
- // Validate in case of configured output
- if(output->total_size() > 0)
+ // Validate in case of configured dst
+ if(dst->total_size() > 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
}
return Status{};
}
} // namespace
-void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
-}
-
-void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
+void ClCastKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- _input = input;
- _output = output;
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
- set_shape_if_empty(*output->info(), input->info()->tensor_shape());
+ // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given)
+ set_shape_if_empty(*dst, src->tensor_shape());
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy));
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({ src, dst });
// Get data sizes
- const size_t input_size = data_size_from_type(input->info()->data_type());
- const size_t output_size = data_size_from_type(output->info()->data_type());
+ const size_t src_size = data_size_from_type(src->data_type());
+ const size_t dst_size = data_size_from_type(dst->data_type());
// Get number of elements to process per iterations
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0));
+ const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0));
// Set build options
CLBuildOptions build_opts;
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
+ build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(src->data_type()));
+ build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst->data_type()));
// Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined
- build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
- build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || is_data_type_float(output->info()->data_type()), "-DIS_DATA_TYPE_FLOAT");
- build_opts.add_option_if(is_data_type_quantized(input->info()->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
+ build_opts.add_option_if(is_data_type_float(src->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
+ build_opts.add_option_if(is_data_type_float(src->data_type()) || is_data_type_float(dst->data_type()), "-DIS_DATA_TYPE_FLOAT");
+ build_opts.add_option_if(is_data_type_quantized(src->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
// Create kernel
- const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up";
+ const std::string kernel_name = (src_size >= dst_size) ? "cast_down" : "cast_up";
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- // Set shift arg
- unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
- _kernel.setArg(idx++, shift);
-
// Configure kernel
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+ Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
// Collapse window
@@ -132,21 +119,45 @@ void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_contex
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
_config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
+ _config_id += lower_string(string_from_data_type(src->data_type()));
_config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += support::cpp11::to_string(src->dimension(0));
_config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
+ _config_id += support::cpp11::to_string(src->dimension(1));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += support::cpp11::to_string(dst->dimension(0));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
+ _config_id += support::cpp11::to_string(dst->dimension(1));
}
-Status CLDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
+Status ClCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift));
-
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy));
return Status{};
}
+
+void ClCastKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ Window slice = collapsed.first_slice_window_3D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, src, slice);
+ add_3D_tensor_argument(idx, dst, slice);
+ enqueue(queue, *this, slice, lws_hint());
+ }
+ while(collapsed.slide_window_slice_3D(slice));
+}
+} // namespace kernels
+} // namespace opencl
} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClCastKernel.h b/src/core/gpu/cl/kernels/ClCastKernel.h
new file mode 100644
index 0000000000..451aa9c1ab
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClCastKernel.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_CAST_KERNEL_H
+#define ARM_COMPUTE_CL_CAST_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** Casts a given tensor to a new type
+ *
+ * @note When casting between quantized types the scale and zeroPoint are ignored
+ */
+class ClCastKernel : public IClKernel
+{
+public:
+ ClCastKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCastKernel);
+ /** Set the src and dst of the kernel.
+ *
+ * Valid conversions src -> dst :
+ *
+ * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
+ * - U8 -> S8, U16, S16, U32, S32, F16, F32
+ * - U16 -> U8, S8, S16, U32, S32, F16, F32
+ * - S16 -> U8, S8, U16, U32, S32, F16, F32
+ * - U32 -> U8, S8, U16, S16, S32, F16, F32
+ * - S32 -> U8, S8, U16, S16, U32, F16, F32
+ * - F16 -> U8, S8, U16, S16, U32, F32
+ * - F32 -> U8, S8, U16, S16, U32, F16
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The source tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
+ * @param[out] dst The destination tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
+ * @param[in] policy Conversion policy
+ */
+ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to @ref ClCastKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_CAST_KERNEL_H */
diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index 312cda399f..8e3b4c8705 100644
--- a/src/graph/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -28,11 +28,7 @@
#include "arm_compute/runtime/CL/CLFunctions.h"
#include "arm_compute/runtime/CPP/CPPFunctions.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLIm2ColKernel.h"
-#include "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
+
#include "support/Cast.h"
using namespace arm_compute::utils::cast;
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index c030a64678..a485e5d235 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -28,17 +28,6 @@
#include "arm_compute/runtime/CPP/CPPFunctions.h"
#include "arm_compute/runtime/NEON/NEFunctions.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-#include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
-#include "src/core/NEON/kernels/NEWeightsReshapeKernel.h"
#include "support/Cast.h"
using namespace arm_compute::utils::cast;
diff --git a/src/runtime/CL/functions/CLCast.cpp b/src/runtime/CL/functions/CLCast.cpp
index 202140d8b9..53256ebed4 100644
--- a/src/runtime/CL/functions/CLCast.cpp
+++ b/src/runtime/CL/functions/CLCast.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,12 +23,31 @@
*/
#include "arm_compute/runtime/CL/functions/CLCast.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClCast.h"
#include <utility>
namespace arm_compute
{
+struct CLCast::Impl
+{
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClCast> op{ nullptr };
+};
+
+CLCast::CLCast()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLCast::CLCast(CLCast &&) = default;
+CLCast &CLCast::operator=(CLCast &&) = default;
+CLCast::~CLCast() = default;
+
void CLCast::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, policy);
@@ -36,13 +55,23 @@ void CLCast::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy
void CLCast::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy)
{
- auto k = std::make_unique<CLDepthConvertLayerKernel>();
- k->configure(compile_context, input, output, policy, 0);
- _kernel = std::move(k);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ _impl->src = input;
+ _impl->dst = output;
+
+ _impl->op = std::make_unique<opencl::ClCast>();
+ _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), policy);
}
Status CLCast::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy)
{
- return CLDepthConvertLayerKernel::validate(input, output, policy, 0);
+ return opencl::ClCast::validate(input, output, policy);
+}
+
+void CLCast::run()
+{
+ ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLDepthConvertLayer.cpp b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
index 47bc52364d..6aa370b23c 100644
--- a/src/runtime/CL/functions/CLDepthConvertLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,12 +23,31 @@
*/
#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClCast.h"
#include <utility>
namespace arm_compute
{
+struct CLDepthConvertLayer::Impl
+{
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClCast> op{ nullptr };
+};
+
+CLDepthConvertLayer::CLDepthConvertLayer()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLDepthConvertLayer::CLDepthConvertLayer(CLDepthConvertLayer &&) = default;
+CLDepthConvertLayer &CLDepthConvertLayer::operator=(CLDepthConvertLayer &&) = default;
+CLDepthConvertLayer::~CLDepthConvertLayer() = default;
+
void CLDepthConvertLayer::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
@@ -36,13 +55,27 @@ void CLDepthConvertLayer::configure(const ICLTensor *input, ICLTensor *output, C
void CLDepthConvertLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
- auto k = std::make_unique<CLDepthConvertLayerKernel>();
- k->configure(compile_context, input, output, policy, shift);
- _kernel = std::move(k);
+ ARM_COMPUTE_UNUSED(shift);
+
+ _impl->src = input;
+ _impl->dst = output;
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
+ ARM_COMPUTE_ERROR_ON(shift != 0);
+
+ _impl->op = std::make_unique<opencl::ClCast>();
+ _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), policy);
}
Status CLDepthConvertLayer::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
{
- return CLDepthConvertLayerKernel::validate(input, output, policy, shift);
+ ARM_COMPUTE_RETURN_ERROR_ON(shift != 0);
+ return opencl::ClCast::validate(input, output, policy);
+}
+
+void CLDepthConvertLayer::run()
+{
+ ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 991472bb7a..50a145f9ca 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 5dc7556b2f..3184d5dfe0 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLCol2ImKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
index 7a01018f59..d5d1b5f41e 100644
--- a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 099a2c980f..3be09581bd 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -34,12 +34,12 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
+#include "src/core/gpu/cl/kernels/ClCastKernel.h"
#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/runtime/CL/gemm_auto_heuristics/CLGEMMAutoHeuristics.h"
@@ -189,7 +189,7 @@ inline bool is_gemm_reshaped(CLGEMMKernelType kernel_type)
CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _weights_to_qasymm8(std::make_unique<CLDepthConvertLayerKernel>()),
+ _weights_to_qasymm8(std::make_unique<opencl::kernels::ClCastKernel>()),
_mm_native_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyNativeKernel>()),
_mm_reshaped_only_rhs_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>()),
_mtx_b_reshape_kernel(std::make_unique<opencl::kernels::ClGemmReshapeRhsMatrixKernel>()),
@@ -272,7 +272,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const CLCompileContext &compile_con
TensorInfo weights_info(*b->info());
weights_info.set_data_type(DataType::QASYMM8);
_qasymm8_weights.allocator()->init(weights_info);
- _weights_to_qasymm8->configure(compile_context, b, &_qasymm8_weights, ConvertPolicy::WRAP, 0);
+ _weights_to_qasymm8->configure(compile_context, b->info(), _qasymm8_weights.info(), ConvertPolicy::WRAP);
}
const ICLTensor *matrix_b = _convert_to_qasymm8 ? &_qasymm8_weights : b;
@@ -480,7 +480,7 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
{
b_offset = -128;
weights_info.set_data_type(DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConvertLayerKernel::validate(b, &weights_info, ConvertPolicy::WRAP, 0));
+ ARM_COMPUTE_RETURN_ON_ERROR(opencl::kernels::ClCastKernel::validate(b, &weights_info, ConvertPolicy::WRAP));
}
const ITensorInfo *matrix_b_info = &weights_info;
if(reshape_matrix_b)
@@ -681,7 +681,8 @@ void CLGEMMLowpMatrixMultiplyCore::prepare()
if(_convert_to_qasymm8)
{
_qasymm8_weights.allocator()->allocate();
- CLScheduler::get().enqueue(*_weights_to_qasymm8, false);
+ ITensorPack convert_to_qs8_pack = { { ACL_SRC, _original_b }, { ACL_DST, &_qasymm8_weights } };
+ CLScheduler::get().enqueue_op(*_weights_to_qasymm8, convert_to_qs8_pack, false);
}
if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp
index 146ac8f619..85d13c246e 100644
--- a/src/runtime/CL/functions/CLLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayer.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp b/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
index 69974424c9..a44dcd2e24 100644
--- a/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLQLSTMLayer.cpp b/src/runtime/CL/functions/CLQLSTMLayer.cpp
index 7b6ec8f5c8..fcf5b9d2a4 100644
--- a/src/runtime/CL/functions/CLQLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLQLSTMLayer.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/core/utils/misc/InfoHelpers.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLRNNLayer.cpp b/src/runtime/CL/functions/CLRNNLayer.cpp
index 45ced35782..755fa40121 100644
--- a/src/runtime/CL/functions/CLRNNLayer.cpp
+++ b/src/runtime/CL/functions/CLRNNLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/NEON/functions/NECast.cpp b/src/runtime/NEON/functions/NECast.cpp
index a42f512ce6..b519576ad5 100644
--- a/src/runtime/NEON/functions/NECast.cpp
+++ b/src/runtime/NEON/functions/NECast.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,23 +23,45 @@
*/
#include "arm_compute/runtime/NEON/functions/NECast.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
-
-#include <utility>
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuCast.h"
namespace arm_compute
{
+struct NECast::Impl
+{
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuCast> op{ nullptr };
+};
+
+NECast::NECast()
+ : _impl(std::make_unique<Impl>())
+{
+}
+NECast::NECast(NECast &&) = default;
+NECast &NECast::operator=(NECast &&) = default;
+NECast::~NECast() = default;
+
void NECast::configure(ITensor *input, ITensor *output, ConvertPolicy policy)
{
- auto k = std::make_unique<NEDepthConvertLayerKernel>();
- k->configure(input, output, policy, 0);
- _kernel = std::move(k);
+ _impl->src = input;
+ _impl->dst = output;
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
+
+ _impl->op = std::make_unique<cpu::CpuCast>();
+ _impl->op->configure(_impl->src->info(), _impl->dst->info(), policy);
}
Status NECast::validate(ITensorInfo *input, ITensorInfo *output, ConvertPolicy policy)
{
- return NEDepthConvertLayerKernel::validate(input, output, policy, 0);
+ return cpu::CpuCast::validate(input, output, policy);
+}
+
+void NECast::run()
+{
+ ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEDepthConvertLayer.cpp b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
index 761de8eb60..07e985c25e 100644
--- a/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,20 +23,51 @@
*/
#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuCast.h"
#include <utility>
-using namespace arm_compute;
+namespace arm_compute
+{
+struct NEDepthConvertLayer::Impl
+{
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuCast> op{ nullptr };
+};
+
+NEDepthConvertLayer::NEDepthConvertLayer()
+ : _impl(std::make_unique<Impl>())
+{
+}
+NEDepthConvertLayer::NEDepthConvertLayer(NEDepthConvertLayer &&) = default;
+NEDepthConvertLayer &NEDepthConvertLayer::operator=(NEDepthConvertLayer &&) = default;
+NEDepthConvertLayer::~NEDepthConvertLayer() = default;
void NEDepthConvertLayer::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
{
- auto k = std::make_unique<NEDepthConvertLayerKernel>();
- k->configure(input, output, policy, shift);
- _kernel = std::move(k);
+ ARM_COMPUTE_UNUSED(shift);
+
+ _impl->src = input;
+ _impl->dst = output;
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
+ ARM_COMPUTE_ERROR_ON(shift != 0);
+
+ _impl->op = std::make_unique<cpu::CpuCast>();
+ _impl->op->configure(_impl->src->info(), _impl->dst->info(), policy);
}
Status NEDepthConvertLayer::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
{
- return NEDepthConvertLayerKernel::validate(input, output, policy, shift);
+ ARM_COMPUTE_RETURN_ERROR_ON(shift != 0);
+ return cpu::CpuCast::validate(input, output, policy);
+}
+
+void NEDepthConvertLayer::run()
+{
+ ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+ _impl->op->run(pack);
}
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuCast.cpp b/src/runtime/cpu/operators/CpuCast.cpp
new file mode 100644
index 0000000000..5a4f6c518e
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuCast.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuCast.h"
+
+#include "src/core/cpu/kernels/CpuCastKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuCast::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
+{
+ auto k = std::make_unique<kernels::CpuCastKernel>();
+ k->configure(src, dst, policy);
+ _kernel = std::move(k);
+}
+
+Status CpuCast::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+ return kernels::CpuCastKernel::validate(src, dst, policy);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuCast.h b/src/runtime/cpu/operators/CpuCast.h
new file mode 100644
index 0000000000..2aea2d2b09
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuCast.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_CAST_H
+#define ARM_COMPUTE_CPU_CAST_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref kernels::CpuCastKernel */
+class CpuCast : public ICpuOperator
+{
+public:
+ /** Constructor */
+ CpuCast() = default;
+ /** Configure operator for a given list of arguments
+ *
+ * Input data type must be different than output data type.
+ *
+ * Valid data layouts:
+ * - All
+ *
+ * Valid data type configurations:
+ * |src |dst |
+ * |:--------------|:-----------------------------------------------|
+ * |QASYMM8_SIGNED | S16, S32, F32, F16 |
+ * |QASYMM8 | U16, S16, S32, F32, F16 |
+ * |U8 | U16, S16, S32, F32, F16 |
+ * |U16 | U8, U32 |
+ * |S16 | QASYMM8_SIGNED, U8, S32 |
+ * |F16 | QASYMM8_SIGNED, QASYMM8, F32, S32, U8 |
+ * |S32 | QASYMM8_SIGNED, QASYMM8, F16, F32, U8 |
+ * |F32 | QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8|
+ *
+ * @param[in] src The source tensor to convert. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[out] dst The destination tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] policy Conversion policy.
+ */
+ void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to @ref CpuCast::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_ACTIVATION_H */
diff --git a/src/runtime/gpu/cl/operators/ClCast.cpp b/src/runtime/gpu/cl/operators/ClCast.cpp
new file mode 100644
index 0000000000..3f54004aa7
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClCast.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClCast.h"
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/kernels/ClCastKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+void ClCast::configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
+{
+ auto k = std::make_unique<kernels::ClCastKernel>();
+ k->configure(compile_context, src, dst, policy);
+ _kernel = std::move(k);
+}
+
+Status ClCast::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+ return kernels::ClCastKernel::validate(src, dst, policy);
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClCast.h b/src/runtime/gpu/cl/operators/ClCast.h
new file mode 100644
index 0000000000..69e028debd
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClCast.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_CAST_H
+#define ARM_COMPUTE_CL_CAST_H
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+/** Basic function to run @ref kernels::ClCastKernel */
+class ClCast : public IClOperator
+{
+public:
+ /** Constructor */
+ ClCast() = default;
+ /** Configure operator for a given list of arguments
+ *
+ * @note Input data type must be different than output data type.
+ *
+ * Valid data layouts:
+ * - All
+ *
+ * Valid data type configurations:
+ * |src |dst |
+ * |:--------------|:--------------------------------------|
+ * |U8 | S8, U16, S16, U32, S32, F16, F32 |
+ * |U16 | U8, S8, S16, U32, S32, F16, F32 |
+ * |S16 | U8, S8, U16, U32, S32, F16, F32 |
+ * |U32 | U8, S8, U16, S16, S32, F16, F32 |
+ * |S32 | U8, S8, U16, S16, U32, F16, F32 |
+ * |F16 | U8, S8, U16, S16, U32, F32 |
+ * |F32 | U8, S8, U16, S16, U32, F16 |
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The source tensor to convert. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[out] dst The destinatio tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] policy Conversion policy.
+ */
+ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to @ref ClCast::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_CAST_H */
diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp
index a823b278fc..8f14337b27 100644
--- a/tests/validation/CL/DepthConvertLayer.cpp
+++ b/tests/validation/CL/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,17 +44,16 @@ namespace validation
namespace
{
/** Input data sets **/
-const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
-const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
-const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
-const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerF16toF32Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerF32toF16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F16));
-const auto DepthConvertLayerShiftDatasetNightly = framework::dataset::make("Shift", 0, 7);
-const auto DepthConvertLayerShiftDatasetPrecommit = framework::dataset::make("Shift", { 0, 3, 6 });
+const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
+const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
+const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
+const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerF16toF32Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerF32toF16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F16));
+const auto DepthConvertLayerZeroShiftDataset = framework::dataset::make("Shift", 0);
} // namespace
TEST_SUITE(CL)
@@ -84,7 +83,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
ConvertPolicy::WRAP,
ConvertPolicy::WRAP,
})),
- framework::dataset::make("Shift",{ 1, 1, 8, 1, 1, 1, })),
+ framework::dataset::make("Shift",{ 0, 0, 0, 1, 1, 0, })),
framework::dataset::make("Expected", { false, false, false, false, false, true})),
input_info, output_info, policy, shift, expected)
{
@@ -111,7 +110,7 @@ using CLDepthConvertLayerToF32Fixture = DepthConvertLayerValidationFixture<CLTen
TEST_SUITE(U8_to_U16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toU16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -119,7 +118,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU16Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toU16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -129,7 +128,7 @@ TEST_SUITE_END() // U8_to_U16
TEST_SUITE(U8_to_S16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -137,7 +136,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS16Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -146,7 +145,7 @@ TEST_SUITE_END() // U8_to_S16
TEST_SUITE(U8_to_S32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -154,7 +153,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS32Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -164,14 +163,14 @@ TEST_SUITE_END() // U8_to_S32
TEST_SUITE(U16_to_U8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -181,14 +180,14 @@ TEST_SUITE_END() // U16_to_U8
TEST_SUITE(U16_to_U32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -198,14 +197,14 @@ TEST_SUITE_END() // U16_to_U32
TEST_SUITE(S16_to_U8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -215,14 +214,14 @@ TEST_SUITE_END() // S16_to_U8
TEST_SUITE(S16_to_S32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/CL/UNIT/WeightsRetention.cpp b/tests/validation/CL/UNIT/WeightsRetention.cpp
index 1965f0f7a5..357c88af10 100644
--- a/tests/validation/CL/UNIT/WeightsRetention.cpp
+++ b/tests/validation/CL/UNIT/WeightsRetention.cpp
@@ -22,7 +22,6 @@
* SOFTWARE.
*/
#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "tests/AssetsLibrary.h"
#include "tests/CL/CLAccessor.h"
diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index 60631181bf..5649e5a556 100644
--- a/tests/validation/NEON/DepthConvertLayer.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -70,11 +70,9 @@ const auto DepthConvertLayerS32toQASYMM8Dataset = combine(framework::dataset::ma
const auto DepthConvertLayerS32toF16Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F16));
const auto DepthConvertLayerS32toU8Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerF16toQASYMM8Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::QASYMM8));
-const auto DepthConvertLayerF32toQASYMM8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QASYMM8));
-const auto DepthConvertLayerShiftDatasetNightly = framework::dataset::make("Shift", 0, 7);
-const auto DepthConvertLayerShiftDatasetPrecommit = framework::dataset::make("Shift", { 0, 3, 6 });
-const auto DepthConvertLayerZeroShiftDataset = framework::dataset::make("Shift", 0);
+const auto DepthConvertLayerF16toQASYMM8Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::QASYMM8));
+const auto DepthConvertLayerF32toQASYMM8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QASYMM8));
+const auto DepthConvertLayerZeroShiftDataset = framework::dataset::make("Shift", 0);
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
constexpr AbsoluteTolerance<int32_t> tolerance_one_int32(1);
@@ -108,7 +106,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
ConvertPolicy::WRAP,
ConvertPolicy::WRAP,
})),
- framework::dataset::make("Shift",{ 1, 1, 1, 1, 1, 1, 8, 1,
+ framework::dataset::make("Shift",{ 0, 0, 0, 1, 1, 1, 8, 1,
})),
framework::dataset::make("Expected", { false, false, false, false, true})),
input_info, output_info, policy, shift, expected)
@@ -188,7 +186,7 @@ TEST_SUITE_END() // QASYMM8_to_S32
TEST_SUITE(U8_to_U16)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toU16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -196,7 +194,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU16Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toU16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -206,7 +204,7 @@ TEST_SUITE_END() // U8_to_U16
TEST_SUITE(U8_to_S16)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -214,7 +212,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS16Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -223,7 +221,7 @@ TEST_SUITE_END() // U8_to_S16
TEST_SUITE(U8_to_S32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -231,7 +229,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -241,7 +239,7 @@ TEST_SUITE_END() // U8_to_S32
TEST_SUITE(U8_to_F32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toF32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -249,7 +247,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF32Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToF32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toF32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -260,7 +258,7 @@ TEST_SUITE_END() // U8_to_F32
TEST_SUITE(U8_to_F16)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toF16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -268,7 +266,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF16Fixture<uint8_t>, frame
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToF16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toF16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -279,14 +277,14 @@ TEST_SUITE_END() // U8_to_F36
TEST_SUITE(U16_to_U8)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -296,14 +294,14 @@ TEST_SUITE_END() // U16_to_U8
TEST_SUITE(U16_to_U32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -313,14 +311,14 @@ TEST_SUITE_END() // U16_to_U32
TEST_SUITE(S16_to_U8)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -330,14 +328,14 @@ TEST_SUITE_END() // S16_to_U8
TEST_SUITE(S16_to_S32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetPrecommit))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toS32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerShiftDatasetNightly))
+ DepthConvertLayerZeroShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);