aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-30 14:46:05 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-05-05 17:39:26 +0000
commitef516e8bb8eb7f55b410268587f3b88b77e2fd8e (patch)
treed2043bf6bae9c51ab0344a4e13f1c54205e28c3c
parent448cb45e2cb86f32a739c925a1ac8c688cf573bf (diff)
downloadComputeLibrary-ef516e8bb8eb7f55b410268587f3b88b77e2fd8e.tar.gz
Rename Quantization/Dequantization kernels/operators to imperative mood
Renames the following kernels/functions - [Cl|Cpu]DequantizationKernel -> [Cl|Cpu]DequantizeKernel - [Cl|Cpu]Dequantization -> [Cl|Cpu]CpuDequantize - [Cl|Cpu]QuantizationKernel -> [Cl|Cpu]QuantizeKernel - [Cl|Cpu]Quantization -> [Cl|Cpu]Quantize Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: Ic3c5eb3b7fe28f807294d159830eef99c2dd6219 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5566 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp16
-rw-r--r--SConscript8
-rw-r--r--arm_compute/runtime/CL/functions/CLDequantizationLayer.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLQuantizationLayer.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEDequantizationLayer.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEQuantizationLayer.h2
-rw-r--r--src/core/cpu/kernels/CpuDequantizeKernel.cpp (renamed from src/core/cpu/kernels/CpuDequantizationKernel.cpp)12
-rw-r--r--src/core/cpu/kernels/CpuDequantizeKernel.h (renamed from src/core/cpu/kernels/CpuDequantizationKernel.h)17
-rw-r--r--src/core/cpu/kernels/CpuQuantizeKernel.cpp (renamed from src/core/cpu/kernels/CpuQuantizationKernel.cpp)47
-rw-r--r--src/core/cpu/kernels/CpuQuantizeKernel.h (renamed from src/core/cpu/kernels/CpuQuantizationKernel.h)26
-rw-r--r--src/core/gpu/cl/kernels/ClDequantizeKernel.cpp (renamed from src/core/gpu/cl/kernels/ClDequantizationKernel.cpp)14
-rw-r--r--src/core/gpu/cl/kernels/ClDequantizeKernel.h (renamed from src/core/gpu/cl/kernels/ClDequantizationKernel.h)20
-rw-r--r--src/core/gpu/cl/kernels/ClQuantizeKernel.cpp (renamed from src/core/gpu/cl/kernels/ClQuantizationKernel.cpp)14
-rw-r--r--src/core/gpu/cl/kernels/ClQuantizeKernel.h (renamed from src/core/gpu/cl/kernels/ClQuantizationKernel.h)21
-rw-r--r--src/runtime/CL/functions/CLDequantizationLayer.cpp12
-rw-r--r--src/runtime/CL/functions/CLQuantizationLayer.cpp12
-rw-r--r--src/runtime/NEON/functions/NEDequantizationLayer.cpp12
-rw-r--r--src/runtime/NEON/functions/NEQuantizationLayer.cpp12
-rw-r--r--src/runtime/cpu/operators/CpuDequantize.cpp (renamed from src/runtime/cpu/operators/CpuDequantization.cpp)14
-rw-r--r--src/runtime/cpu/operators/CpuDequantize.h (renamed from src/runtime/cpu/operators/CpuDequantization.h)22
-rw-r--r--src/runtime/cpu/operators/CpuQuantize.cpp (renamed from src/runtime/cpu/operators/CpuQuantization.cpp)14
-rw-r--r--src/runtime/cpu/operators/CpuQuantize.h (renamed from src/runtime/cpu/operators/CpuQuantization.h)29
-rw-r--r--src/runtime/gpu/cl/operators/ClDequantize.cpp (renamed from src/runtime/gpu/cl/operators/ClQuantization.cpp)14
-rw-r--r--src/runtime/gpu/cl/operators/ClDequantize.h (renamed from src/runtime/gpu/cl/operators/ClDequantization.h)18
-rw-r--r--src/runtime/gpu/cl/operators/ClQuantize.cpp (renamed from src/runtime/gpu/cl/operators/ClDequantization.cpp)15
-rw-r--r--src/runtime/gpu/cl/operators/ClQuantize.h (renamed from src/runtime/gpu/cl/operators/ClQuantization.h)23
26 files changed, 178 insertions, 222 deletions
diff --git a/Android.bp b/Android.bp
index 046b1c08a5..1354b365e4 100644
--- a/Android.bp
+++ b/Android.bp
@@ -299,7 +299,7 @@ cc_library_static {
"src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.cpp",
"src/core/cpu/kernels/CpuCopyKernel.cpp",
"src/core/cpu/kernels/CpuDepthwiseConvolutionNativeKernel.cpp",
- "src/core/cpu/kernels/CpuDequantizationKernel.cpp",
+ "src/core/cpu/kernels/CpuDequantizeKernel.cpp",
"src/core/cpu/kernels/CpuDirectConvolutionKernel.cpp",
"src/core/cpu/kernels/CpuDirectConvolutionOutputStageKernel.cpp",
"src/core/cpu/kernels/CpuElementwiseKernel.cpp",
@@ -310,7 +310,7 @@ cc_library_static {
"src/core/cpu/kernels/CpuPermuteKernel.cpp",
"src/core/cpu/kernels/CpuPoolingAssemblyWrapperKernel.cpp",
"src/core/cpu/kernels/CpuPoolingKernel.cpp",
- "src/core/cpu/kernels/CpuQuantizationKernel.cpp",
+ "src/core/cpu/kernels/CpuQuantizeKernel.cpp",
"src/core/cpu/kernels/CpuReshapeKernel.cpp",
"src/core/cpu/kernels/CpuScaleKernel.cpp",
"src/core/cpu/kernels/CpuSoftmaxKernel.cpp",
@@ -360,7 +360,7 @@ cc_library_static {
"src/core/gpu/cl/kernels/ClCopyKernel.cpp",
"src/core/gpu/cl/kernels/ClCropKernel.cpp",
"src/core/gpu/cl/kernels/ClDepthConcatenateKernel.cpp",
- "src/core/gpu/cl/kernels/ClDequantizationKernel.cpp",
+ "src/core/gpu/cl/kernels/ClDequantizeKernel.cpp",
"src/core/gpu/cl/kernels/ClDirectConvolutionKernel.cpp",
"src/core/gpu/cl/kernels/ClElementwiseKernel.cpp",
"src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp",
@@ -370,7 +370,7 @@ cc_library_static {
"src/core/gpu/cl/kernels/ClMulKernel.cpp",
"src/core/gpu/cl/kernels/ClPermuteKernel.cpp",
"src/core/gpu/cl/kernels/ClPoolingKernel.cpp",
- "src/core/gpu/cl/kernels/ClQuantizationKernel.cpp",
+ "src/core/gpu/cl/kernels/ClQuantizeKernel.cpp",
"src/core/gpu/cl/kernels/ClReshapeKernel.cpp",
"src/core/gpu/cl/kernels/ClScaleKernel.cpp",
"src/core/gpu/cl/kernels/ClSoftmaxKernel.cpp",
@@ -634,7 +634,7 @@ cc_library_static {
"src/runtime/cpu/operators/CpuCopy.cpp",
"src/runtime/cpu/operators/CpuDepthwiseConvolution.cpp",
"src/runtime/cpu/operators/CpuDepthwiseConvolutionAssemblyDispatch.cpp",
- "src/runtime/cpu/operators/CpuDequantization.cpp",
+ "src/runtime/cpu/operators/CpuDequantize.cpp",
"src/runtime/cpu/operators/CpuDirectConvolution.cpp",
"src/runtime/cpu/operators/CpuElementwise.cpp",
"src/runtime/cpu/operators/CpuElementwiseUnary.cpp",
@@ -644,7 +644,7 @@ cc_library_static {
"src/runtime/cpu/operators/CpuPermute.cpp",
"src/runtime/cpu/operators/CpuPooling.cpp",
"src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp",
- "src/runtime/cpu/operators/CpuQuantization.cpp",
+ "src/runtime/cpu/operators/CpuQuantize.cpp",
"src/runtime/cpu/operators/CpuReshape.cpp",
"src/runtime/cpu/operators/CpuScale.cpp",
"src/runtime/cpu/operators/CpuSoftmax.cpp",
@@ -656,7 +656,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClConvertFullyConnectedWeights.cpp",
"src/runtime/gpu/cl/operators/ClCopy.cpp",
"src/runtime/gpu/cl/operators/ClCrop.cpp",
- "src/runtime/gpu/cl/operators/ClDequantization.cpp",
+ "src/runtime/gpu/cl/operators/ClDequantize.cpp",
"src/runtime/gpu/cl/operators/ClDirectConvolution.cpp",
"src/runtime/gpu/cl/operators/ClElementwiseOperations.cpp",
"src/runtime/gpu/cl/operators/ClElementwiseUnary.cpp",
@@ -667,7 +667,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClPRelu.cpp",
"src/runtime/gpu/cl/operators/ClPermute.cpp",
"src/runtime/gpu/cl/operators/ClPooling.cpp",
- "src/runtime/gpu/cl/operators/ClQuantization.cpp",
+ "src/runtime/gpu/cl/operators/ClQuantize.cpp",
"src/runtime/gpu/cl/operators/ClReshape.cpp",
"src/runtime/gpu/cl/operators/ClScale.cpp",
"src/runtime/gpu/cl/operators/ClSoftmax.cpp",
diff --git a/SConscript b/SConscript
index da92409867..e94ff1ed6d 100644
--- a/SConscript
+++ b/SConscript
@@ -307,13 +307,13 @@ if env['neon']:
'src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp',
'src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.cpp',
'src/core/cpu/kernels/CpuCopyKernel.cpp',
- 'src/core/cpu/kernels/CpuDequantizationKernel.cpp',
+ 'src/core/cpu/kernels/CpuDequantizeKernel.cpp',
'src/core/cpu/kernels/CpuElementwiseKernel.cpp',
'src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp',
'src/core/cpu/kernels/CpuFillKernel.cpp',
'src/core/cpu/kernels/CpuFloorKernel.cpp',
'src/core/cpu/kernels/CpuMulKernel.cpp',
- 'src/core/cpu/kernels/CpuQuantizationKernel.cpp',
+ 'src/core/cpu/kernels/CpuQuantizeKernel.cpp',
'src/core/cpu/kernels/CpuScaleKernel.cpp',
'src/core/cpu/kernels/CpuSoftmaxKernel.cpp',
'src/core/cpu/kernels/CpuSubKernel.cpp',
@@ -354,13 +354,13 @@ if env['neon']:
'src/runtime/cpu/operators/CpuConcatenate.cpp',
'src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.cpp',
'src/runtime/cpu/operators/CpuCopy.cpp',
- 'src/runtime/cpu/operators/CpuDequantization.cpp',
+ 'src/runtime/cpu/operators/CpuDequantize.cpp',
'src/runtime/cpu/operators/CpuElementwise.cpp',
'src/runtime/cpu/operators/CpuElementwiseUnary.cpp',
'src/runtime/cpu/operators/CpuFill.cpp',
'src/runtime/cpu/operators/CpuFloor.cpp',
'src/runtime/cpu/operators/CpuMul.cpp',
- 'src/runtime/cpu/operators/CpuQuantization.cpp',
+ 'src/runtime/cpu/operators/CpuQuantize.cpp',
'src/runtime/cpu/operators/CpuReshape.cpp',
'src/runtime/cpu/operators/CpuScale.cpp',
'src/runtime/cpu/operators/CpuSoftmax.cpp',
diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
index 601c13d0e4..b01fe9eb14 100644
--- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
@@ -36,7 +36,7 @@ class CLCompileContext;
class ICLTensor;
class ITensorInfo;
-/** Basic function to run @ref opencl::ClDequantization that dequantizes an input tensor */
+/** Basic function to run @ref opencl::ClDequantize that dequantizes an input tensor */
class CLDequantizationLayer : public IFunction
{
public:
diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
index a61735cb97..6543496d93 100644
--- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
@@ -37,7 +37,7 @@ class ITensorInfo;
/** Basic function to simulate a quantization layer. This function calls the following CL kernels:
*
- * -# @ref opencl::ClQuantization
+ * -# @ref opencl::ClQuantize
*
* @note The implementation supports only 3D input tensors.
*
diff --git a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
index 91ed056cf3..8b49930ef5 100644
--- a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
@@ -35,7 +35,7 @@ namespace arm_compute
class ITensor;
class ITensorInfo;
-/** Basic function to run @ref cpu::CpuDequantization that dequantizes an input tensor */
+/** Basic function to run @ref cpu::CpuDequantize that dequantizes an input tensor */
class NEDequantizationLayer : public IFunction
{
public:
diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
index eeca2bb1db..7bf97e28a5 100644
--- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
@@ -35,7 +35,7 @@ namespace arm_compute
class ITensor;
class ITensorInfo;
-/** Basic function to run a quantization layer using @ref cpu::CpuQuantization */
+/** Basic function to run a quantization layer using @ref cpu::CpuQuantize */
class NEQuantizationLayer : public IFunction
{
public:
diff --git a/src/core/cpu/kernels/CpuDequantizationKernel.cpp b/src/core/cpu/kernels/CpuDequantizeKernel.cpp
index 2aa9fb9068..42b5439697 100644
--- a/src/core/cpu/kernels/CpuDequantizationKernel.cpp
+++ b/src/core/cpu/kernels/CpuDequantizeKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/cpu/kernels/CpuDequantizationKernel.h"
+#include "src/core/cpu/kernels/CpuDequantizeKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -349,7 +349,7 @@ void run_dequantization_core(const ITensor *input, ITensor *output, const Window
}
} // namespace
-void CpuDequantizationKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
+void CpuDequantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
@@ -362,13 +362,13 @@ void CpuDequantizationKernel::configure(const ITensorInfo *src, ITensorInfo *dst
ICpuKernel::configure(win);
}
-Status CpuDequantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
return Status{};
}
-void CpuDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+void CpuDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
@@ -391,9 +391,9 @@ void CpuDequantizationKernel::run_op(ITensorPack &tensors, const Window &window,
ARM_COMPUTE_ERROR("Unsupported data type.");
}
}
-const char *CpuDequantizationKernel::name() const
+const char *CpuDequantizeKernel::name() const
{
- return "CpuDequantizationKernel";
+ return "CpuDequantizeKernel";
}
} // namespace kernels
} // namespace cpu
diff --git a/src/core/cpu/kernels/CpuDequantizationKernel.h b/src/core/cpu/kernels/CpuDequantizeKernel.h
index 8ac807097c..798f32cec7 100644
--- a/src/core/cpu/kernels/CpuDequantizationKernel.h
+++ b/src/core/cpu/kernels/CpuDequantizeKernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H
-#define ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H
+#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H
#include "src/core/common/Macros.h"
#include "src/core/cpu/ICpuKernel.h"
@@ -34,22 +34,21 @@ namespace cpu
namespace kernels
{
/** Interface for the dequantization layer kernel. */
-class CpuDequantizationKernel : public ICpuKernel
+class CpuDequantizeKernel : public ICpuKernel
{
public:
/** Default constructor */
- CpuDequantizationKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizationKernel);
+ CpuDequantizeKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizeKernel);
/** Set input, output tensors.
*
* @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantizationKernel
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] dst Destination tensor info. Data types supported: F16/F32.
+ * Similar to @ref CpuDequantizeKernel::configure()
*
* @return a status
*/
@@ -62,4 +61,4 @@ public:
} // namespace kernels
} // namespace cpu
} // namespace arm_compute
-#endif /*ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H */
+#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuQuantizationKernel.cpp b/src/core/cpu/kernels/CpuQuantizeKernel.cpp
index 9b1e017275..8ca81e8b11 100644
--- a/src/core/cpu/kernels/CpuQuantizationKernel.cpp
+++ b/src/core/cpu/kernels/CpuQuantizeKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/cpu/kernels/CpuQuantizationKernel.h"
+#include "src/core/cpu/kernels/CpuQuantizeKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -108,34 +108,29 @@ vector_type<int8_t> vquantize_qasymm8<int8_t>(const float32x4x4_t &qv, const Uni
} // namespace
-CpuQuantizationKernel::CpuQuantizationKernel()
- : _func(nullptr)
-{
-}
-
-void CpuQuantizationKernel::configure(ITensorInfo *src, ITensorInfo *dst)
+void CpuQuantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
- static const std::map<std::string, QuantizationFunctionExecutorPtr> quant_map =
+ static const std::map<std::string, QuantizeFunctionExecutorPtr> quant_map =
{
- { "op_QASYMM8_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<uint8_t, uint8_t> },
- { "op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<uint8_t, int8_t> },
- { "op_QASYMM8_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<uint8_t> },
+ { "op_QASYMM8_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, uint8_t> },
+ { "op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, int8_t> },
+ { "op_QASYMM8_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<uint8_t> },
- { "op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<int8_t, uint8_t> },
- { "op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<int8_t, int8_t> },
- { "op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<int8_t> },
+ { "op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, uint8_t> },
+ { "op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, int8_t> },
+ { "op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<int8_t> },
- { "op_F32_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<float, uint8_t> },
- { "op_F32_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<float, int8_t> },
- { "op_F32_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<float> },
+ { "op_F32_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float, uint8_t> },
+ { "op_F32_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float, int8_t> },
+ { "op_F32_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float> },
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- { "op_F16_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<float16_t, uint8_t> },
- { "op_F16_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<float16_t, int8_t> },
- { "op_F16_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<float16_t> },
+ { "op_F16_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, uint8_t> },
+ { "op_F16_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, int8_t> },
+ { "op_F16_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float16_t> },
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
};
@@ -156,14 +151,14 @@ void CpuQuantizationKernel::configure(ITensorInfo *src, ITensorInfo *dst)
ICpuKernel::configure(win_config);
}
-Status CpuQuantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
return Status{};
}
template <typename TIn, typename TOut>
-void CpuQuantizationKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+void CpuQuantizeKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
{
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
@@ -206,7 +201,7 @@ void CpuQuantizationKernel::run_quantize_qasymm8(const ITensor *src, ITensor *ds
}
template <typename T>
-void CpuQuantizationKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+void CpuQuantizeKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
{
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
@@ -250,7 +245,7 @@ void CpuQuantizationKernel::run_quantize_qasymm16(const ITensor *src, ITensor *d
input, output);
}
-void CpuQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+void CpuQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
@@ -262,9 +257,9 @@ void CpuQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, c
(this->*_func)(src, dst, window);
}
-const char *CpuQuantizationKernel::name() const
+const char *CpuQuantizeKernel::name() const
{
- return "CpuQuantizationKernel";
+ return "CpuQuantizeKernel";
}
} // namespace kernels
} // namespace cpu
diff --git a/src/core/cpu/kernels/CpuQuantizationKernel.h b/src/core/cpu/kernels/CpuQuantizeKernel.h
index 51d9a4e94f..d3422d3fbd 100644
--- a/src/core/cpu/kernels/CpuQuantizationKernel.h
+++ b/src/core/cpu/kernels/CpuQuantizeKernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H
-#define ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H
+#ifndef ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H
#include "src/core/common/Macros.h"
#include "src/core/cpu/ICpuKernel.h"
@@ -36,14 +36,13 @@ namespace kernels
/** Interface for the quantization layer kernel.
*
* @note The implementation supports only 3D input tensors
- *
*/
-class CpuQuantizationKernel : public ICpuKernel
+class CpuQuantizeKernel : public ICpuKernel
{
public:
/** Default constructor */
- CpuQuantizationKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizationKernel);
+ CpuQuantizeKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizeKernel);
/** Set the input, output.
*
* @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
@@ -51,11 +50,10 @@ public:
*
* @note Output auto initialization is not supported by this kernel
*/
- void configure(ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantizationKernel
+ void configure(const ITensorInfo *src, ITensorInfo *dst);
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
+ * Similar to @ref CpuQuantizeKernel::configure()
*
* @return a status
*/
@@ -66,11 +64,11 @@ public:
const char *name() const override;
private:
- /** Common signature for all the specialised @ref NEQuantizationLayerKernel functions
+ /** Common signature for all the specialised @ref CpuQuantizeKernel functions
*
* @param[in] window Region on which to execute the kernel.
*/
- using QuantizationFunctionExecutorPtr = void (CpuQuantizationKernel::*)(const ITensor *src, ITensor *dst, const Window &window);
+ using QuantizeFunctionExecutorPtr = void (CpuQuantizeKernel::*)(const ITensor *src, ITensor *dst, const Window &window);
/** Function to apply QASYMM8 or QASYMM8_SIGNED quantization on a tensor.
*
* @param[in] window Region on which to execute the kernel.
@@ -84,9 +82,9 @@ private:
template <typename T>
void run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window);
- QuantizationFunctionExecutorPtr _func;
+ QuantizeFunctionExecutorPtr _func{ nullptr };
};
} // namespace kernels
} // namespace cpu
} // namespace arm_compute
-#endif /*ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H */
+#endif /* ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClDequantizationKernel.cpp b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
index 6421a08206..f2758b759f 100644
--- a/src/core/gpu/cl/kernels/ClDequantizationKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/gpu/cl/kernels/ClDequantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -29,9 +29,11 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+
#include "support/Cast.h"
#include "support/StringSupport.h"
@@ -59,11 +61,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
}
} // namespace
-ClDequantizationKernel::ClDequantizationKernel()
-{
-}
-
-void ClDequantizationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClDequantizeKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
@@ -115,13 +113,13 @@ void ClDequantizationKernel::configure(const CLCompileContext &compile_context,
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status ClDequantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
return Status{};
}
-void ClDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+void ClDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
diff --git a/src/core/gpu/cl/kernels/ClDequantizationKernel.h b/src/core/gpu/cl/kernels/ClDequantizeKernel.h
index 3ccf90c204..33e0164cc9 100644
--- a/src/core/gpu/cl/kernels/ClDequantizationKernel.h
+++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.h
@@ -21,29 +21,26 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H
-#define ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H
+#ifndef ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H
-#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/common/Macros.h"
#include "src/core/gpu/cl/ClCompileContext.h"
#include "src/core/gpu/cl/IClKernel.h"
namespace arm_compute
{
-class ICLTensor;
-
namespace opencl
{
namespace kernels
{
/** Interface for the dequantization layer kernel. */
-class ClDequantizationKernel : public IClKernel
+class ClDequantizeKernel : public IClKernel
{
public:
/** Default constructor */
- ClDequantizationKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizationKernel);
+ ClDequantizeKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizeKernel);
/** Initialise the kernel's input and output
*
* @param[in] compile_context The compile context to be used.
@@ -51,10 +48,9 @@ public:
* @param[out] dst Destination tensor info. Data types supported: F16/F32.
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClDequantizationKernel
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] dst Output tensor info. Data types supported: F16/F32.
+ * Similar to @ref ClDequantizeKernel::configure()
*
* @return a status
*/
@@ -66,4 +62,4 @@ public:
} // namespace kernels
} // namespace opencl
} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H */
+#endif /* ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClQuantizationKernel.cpp b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
index 9926123529..48d351d536 100644
--- a/src/core/gpu/cl/kernels/ClQuantizationKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -31,8 +31,10 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/WindowHelpers.h"
+
#include "support/Cast.h"
#include "support/StringSupport.h"
@@ -59,11 +61,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
}
} // namespace
-ClQuantizationKernel::ClQuantizationKernel()
-{
-}
-
-void ClQuantizationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClQuantizeKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
@@ -146,13 +144,13 @@ void ClQuantizationKernel::configure(const CLCompileContext &compile_context, IT
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status ClQuantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
return Status{};
}
-void ClQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+void ClQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
diff --git a/src/core/gpu/cl/kernels/ClQuantizationKernel.h b/src/core/gpu/cl/kernels/ClQuantizeKernel.h
index 20822cf9c9..8d37f33032 100644
--- a/src/core/gpu/cl/kernels/ClQuantizationKernel.h
+++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H
-#define ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H
+#ifndef ARM_COMPUTE_CL_QUANTIZE_KERNEL_H
+#define ARM_COMPUTE_CL_QUANTIZE_KERNEL_H
#include "src/core/common/Macros.h"
#include "src/core/gpu/cl/ClCompileContext.h"
@@ -30,8 +30,6 @@
namespace arm_compute
{
-class ICLTensor;
-
namespace opencl
{
namespace kernels
@@ -40,12 +38,12 @@ namespace kernels
*
* @note The implementation supports only 3D input tensors.
*/
-class ClQuantizationKernel : public IClKernel
+class ClQuantizeKernel : public IClKernel
{
public:
/** Default constructor */
- ClQuantizationKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizationKernel);
+ ClQuantizeKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizeKernel);
/** Set the input, output.
*
* @param[in] compile_context The compile context to be used.
@@ -54,11 +52,10 @@ public:
*
* @note Output auto initialization is not supported by this kernel
*/
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClQuantizationKernel
+ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[in] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
+ * Similar to @ref ClQuantizeKernel::configure()
*
* @return a status
*/
@@ -70,4 +67,4 @@ public:
} // namespace kernels
} // namespace opencl
} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H */
+#endif /* ARM_COMPUTE_CL_QUANTIZE_KERNEL_H */
diff --git a/src/runtime/CL/functions/CLDequantizationLayer.cpp b/src/runtime/CL/functions/CLDequantizationLayer.cpp
index e0381f90ae..3b104017e7 100644
--- a/src/runtime/CL/functions/CLDequantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLDequantizationLayer.cpp
@@ -27,15 +27,15 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/CL/ICLKernel.h"
-#include "src/runtime/gpu/cl/operators/ClDequantization.h"
+#include "src/runtime/gpu/cl/operators/ClDequantize.h"
namespace arm_compute
{
struct CLDequantizationLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<opencl::ClDequantization> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClDequantize> op{ nullptr };
};
CLDequantizationLayer::CLDequantizationLayer()
@@ -54,13 +54,13 @@ void CLDequantizationLayer::configure(const CLCompileContext &compile_context, c
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<opencl::ClDequantization>();
+ _impl->op = std::make_unique<opencl::ClDequantize>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLDequantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return opencl::ClDequantization::validate(input, output);
+ return opencl::ClDequantize::validate(input, output);
}
void CLDequantizationLayer::run()
diff --git a/src/runtime/CL/functions/CLQuantizationLayer.cpp b/src/runtime/CL/functions/CLQuantizationLayer.cpp
index 1f6ddb6014..e6451b2eb4 100644
--- a/src/runtime/CL/functions/CLQuantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLQuantizationLayer.cpp
@@ -26,15 +26,15 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/ICLKernel.h"
-#include "src/runtime/gpu/cl/operators/ClQuantization.h"
+#include "src/runtime/gpu/cl/operators/ClQuantize.h"
namespace arm_compute
{
struct CLQuantizationLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<opencl::ClQuantization> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClQuantize> op{ nullptr };
};
CLQuantizationLayer::CLQuantizationLayer()
@@ -53,13 +53,13 @@ void CLQuantizationLayer::configure(const CLCompileContext &compile_context, con
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<opencl::ClQuantization>();
+ _impl->op = std::make_unique<opencl::ClQuantize>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return opencl::ClQuantization::validate(input, output);
+ return opencl::ClQuantize::validate(input, output);
}
void CLQuantizationLayer::run()
diff --git a/src/runtime/NEON/functions/NEDequantizationLayer.cpp b/src/runtime/NEON/functions/NEDequantizationLayer.cpp
index 210fbe0eb2..91e37594af 100644
--- a/src/runtime/NEON/functions/NEDequantizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEDequantizationLayer.cpp
@@ -26,15 +26,15 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/Tensor.h"
-#include "src/runtime/cpu/operators/CpuDequantization.h"
+#include "src/runtime/cpu/operators/CpuDequantize.h"
namespace arm_compute
{
struct NEDequantizationLayer::Impl
{
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- std::unique_ptr<cpu::CpuDequantization> op{ nullptr };
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuDequantize> op{ nullptr };
};
NEDequantizationLayer::NEDequantizationLayer()
@@ -47,13 +47,13 @@ void NEDequantizationLayer::configure(const ITensor *input, ITensor *output)
{
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<cpu::CpuDequantization>();
+ _impl->op = std::make_unique<cpu::CpuDequantize>();
_impl->op->configure(input->info(), output->info());
}
Status NEDequantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return cpu::CpuDequantization::validate(input, output);
+ return cpu::CpuDequantize::validate(input, output);
}
void NEDequantizationLayer::run()
diff --git a/src/runtime/NEON/functions/NEQuantizationLayer.cpp b/src/runtime/NEON/functions/NEQuantizationLayer.cpp
index 58ba68725b..e607917615 100644
--- a/src/runtime/NEON/functions/NEQuantizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEQuantizationLayer.cpp
@@ -26,15 +26,15 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/Tensor.h"
-#include "src/runtime/cpu/operators/CpuQuantization.h"
+#include "src/runtime/cpu/operators/CpuQuantize.h"
namespace arm_compute
{
struct NEQuantizationLayer::Impl
{
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- std::unique_ptr<cpu::CpuQuantization> op{ nullptr };
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuQuantize> op{ nullptr };
};
NEQuantizationLayer::NEQuantizationLayer()
@@ -45,14 +45,14 @@ NEQuantizationLayer::~NEQuantizationLayer() = default;
Status NEQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return cpu::CpuQuantization::validate(input, output);
+ return cpu::CpuQuantize::validate(input, output);
}
void NEQuantizationLayer::configure(const ITensor *input, ITensor *output)
{
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<cpu::CpuQuantization>();
+ _impl->op = std::make_unique<cpu::CpuQuantize>();
_impl->op->configure(input->info(), output->info());
}
diff --git a/src/runtime/cpu/operators/CpuDequantization.cpp b/src/runtime/cpu/operators/CpuDequantize.cpp
index 0a3f602da1..80a2e28aee 100644
--- a/src/runtime/cpu/operators/CpuDequantization.cpp
+++ b/src/runtime/cpu/operators/CpuDequantize.cpp
@@ -21,30 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/cpu/operators/CpuDequantization.h"
+#include "src/runtime/cpu/operators/CpuDequantize.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuDequantizationKernel.h"
+#include "src/core/cpu/kernels/CpuDequantizeKernel.h"
namespace arm_compute
{
namespace cpu
{
-void CpuDequantization::configure(const ITensorInfo *src, ITensorInfo *dst)
+void CpuDequantize::configure(const ITensorInfo *src, ITensorInfo *dst)
{
- auto k = std::make_unique<kernels::CpuDequantizationKernel>();
+ auto k = std::make_unique<kernels::CpuDequantizeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
}
-Status CpuDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- return kernels::CpuDequantizationKernel::validate(src, dst);
+ return kernels::CpuDequantizeKernel::validate(src, dst);
}
-void CpuDequantization::run(ITensorPack &tensors)
+void CpuDequantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
prepare(tensors);
diff --git a/src/runtime/cpu/operators/CpuDequantization.h b/src/runtime/cpu/operators/CpuDequantize.h
index 22f8114149..d1fb9e8d0e 100644
--- a/src/runtime/cpu/operators/CpuDequantization.h
+++ b/src/runtime/cpu/operators/CpuDequantize.h
@@ -21,36 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_DEQUANTIZATION_H
-#define ARM_COMPUTE_CPU_DEQUANTIZATION_H
+#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_H
+#define ARM_COMPUTE_CPU_DEQUANTIZE_H
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "src/core/cpu/ICpuKernel.h"
#include "src/runtime/cpu/ICpuOperator.h"
-#include <memory>
-
namespace arm_compute
{
namespace cpu
{
-/** Basic function to run @ref kernels::CpuDequantizationKernel that dequantizes an input tensor */
-class CpuDequantization : public ICpuOperator
+/** Basic function to run @ref kernels::CpuDequantizeKernel that dequantizes an input tensor */
+class CpuDequantize : public ICpuOperator
{
public:
/** Default Constructor */
- CpuDequantization() = default;
+ CpuDequantize() = default;
/** Configure the kernel.
*
* @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantization
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] dst Destination tensor info. Data type supported: F16/F32.
+ * Similar to @ref CpuDequantize::configure()
*
* @return a status
*/
@@ -61,4 +55,4 @@ public:
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_DEQUANTIZATION_H */
+#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_H */
diff --git a/src/runtime/cpu/operators/CpuQuantization.cpp b/src/runtime/cpu/operators/CpuQuantize.cpp
index ede13850e7..5af7f6343b 100644
--- a/src/runtime/cpu/operators/CpuQuantization.cpp
+++ b/src/runtime/cpu/operators/CpuQuantize.cpp
@@ -22,34 +22,34 @@
* SOFTWARE.
*/
-#include "src/runtime/cpu/operators/CpuQuantization.h"
+#include "src/runtime/cpu/operators/CpuQuantize.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuQuantizationKernel.h"
+#include "src/core/cpu/kernels/CpuQuantizeKernel.h"
namespace arm_compute
{
namespace cpu
{
-Status CpuQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizationKernel::validate(src, dst));
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizeKernel::validate(src, dst));
return Status{};
}
-void CpuQuantization::configure(ITensorInfo *src, ITensorInfo *dst)
+void CpuQuantize::configure(const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
// Configure quantize kernel
- auto k = std::make_unique<kernels::CpuQuantizationKernel>();
+ auto k = std::make_unique<kernels::CpuQuantizeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
}
-void CpuQuantization::run(ITensorPack &tensors)
+void CpuQuantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
diff --git a/src/runtime/cpu/operators/CpuQuantization.h b/src/runtime/cpu/operators/CpuQuantize.h
index 97f0c5fa79..09afffd920 100644
--- a/src/runtime/cpu/operators/CpuQuantization.h
+++ b/src/runtime/cpu/operators/CpuQuantize.h
@@ -21,41 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_QUANTIZATION_H
-#define ARM_COMPUTE_CPU_QUANTIZATION_H
+#ifndef ARM_COMPUTE_CPU_QUANTIZE_H
+#define ARM_COMPUTE_CPU_QUANTIZE_H
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "src/core/cpu/ICpuKernel.h"
#include "src/runtime/cpu/ICpuOperator.h"
-#include <memory>
-
namespace arm_compute
{
namespace cpu
{
-/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) kernels:
- *
- *
- * -# @ref kernels::CpuQuantizationKernel
- *
- */
-class CpuQuantization : public ICpuOperator
+/** Basic function to run @ref kernels::CpuQuantizeKernel that dequantizes an input tensor */
+class CpuQuantize : public ICpuOperator
{
public:
/** Default Constructor */
- CpuQuantization() = default;
+ CpuQuantize() = default;
/** Set the input and output tensors.
*
* @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
* @param[out] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16
*/
- void configure(ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantization
+ void configure(const ITensorInfo *src, ITensorInfo *dst);
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16
+ * Similar to @ref CpuQuantize::configure()
*
* @return a status
*/
@@ -66,4 +55,4 @@ public:
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_QUANTIZATION_H */
+#endif /* ARM_COMPUTE_CPU_QUANTIZE_H */
diff --git a/src/runtime/gpu/cl/operators/ClQuantization.cpp b/src/runtime/gpu/cl/operators/ClDequantize.cpp
index 2e753b550e..0c1391bb45 100644
--- a/src/runtime/gpu/cl/operators/ClQuantization.cpp
+++ b/src/runtime/gpu/cl/operators/ClDequantize.cpp
@@ -21,30 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/gpu/cl/operators/ClQuantization.h"
+#include "src/runtime/gpu/cl/operators/ClDequantize.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h"
namespace arm_compute
{
namespace opencl
{
-void ClQuantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClDequantize::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
{
- auto k = std::make_unique<kernels::ClQuantizationKernel>();
+ auto k = std::make_unique<kernels::ClDequantizeKernel>();
k->configure(compile_context, src, dst);
_kernel = std::move(k);
}
-Status ClQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- return kernels::ClQuantizationKernel::validate(src, dst);
+ return kernels::ClDequantizeKernel::validate(src, dst);
}
-void ClQuantization::run(ITensorPack &tensors)
+void ClDequantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
CLScheduler::get().enqueue_op(*_kernel.get(), tensors);
diff --git a/src/runtime/gpu/cl/operators/ClDequantization.h b/src/runtime/gpu/cl/operators/ClDequantize.h
index a696b73d2e..47fad3eeee 100644
--- a/src/runtime/gpu/cl/operators/ClDequantization.h
+++ b/src/runtime/gpu/cl/operators/ClDequantize.h
@@ -21,10 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_DEQUANTIZATION_H
-#define ARM_COMPUTE_CL_DEQUANTIZATION_H
+#ifndef ARM_COMPUTE_CL_DEQUANTIZE_H
+#define ARM_COMPUTE_CL_DEQUANTIZE_H
-#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/gpu/cl/ClCompileContext.h"
#include "src/runtime/gpu/cl/IClOperator.h"
@@ -32,12 +31,12 @@ namespace arm_compute
{
namespace opencl
{
-/** Basic function to run @ref kernels::ClDequantizationKernel that dequantizes an input tensor */
-class ClDequantization : public IClOperator
+/** Basic function to run @ref kernels::ClDequantizeKernel that dequantizes an input tensor */
+class ClDequantize : public IClOperator
{
public:
/** Constructor */
- ClDequantization() = default;
+ ClDequantize() = default;
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -45,10 +44,9 @@ public:
* @param[out] dst Destination tensor info with the same dimensions of @p src. Data type supported: F16/F32.
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] dst Output tensor info. Data type supported: F16/F32.
+ * Similar to @ref ClDequantize::configure()
*
* @return a status
*/
@@ -59,4 +57,4 @@ public:
};
} // namespace opencl
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_DEQUANTIZATION_H */
+#endif /* ARM_COMPUTE_CL_DEQUANTIZE_H */
diff --git a/src/runtime/gpu/cl/operators/ClDequantization.cpp b/src/runtime/gpu/cl/operators/ClQuantize.cpp
index df3203d2e1..92bbb62ba5 100644
--- a/src/runtime/gpu/cl/operators/ClDequantization.cpp
+++ b/src/runtime/gpu/cl/operators/ClQuantize.cpp
@@ -21,31 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/gpu/cl/operators/ClDequantization.h"
+#include "src/runtime/gpu/cl/operators/ClQuantize.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/kernels/ClDequantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h"
namespace arm_compute
{
namespace opencl
{
-void ClDequantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClQuantize::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
{
- auto k = std::make_unique<kernels::ClDequantizationKernel>();
+ auto k = std::make_unique<kernels::ClQuantizeKernel>();
k->configure(compile_context, src, dst);
_kernel = std::move(k);
}
-Status ClDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- return kernels::ClDequantizationKernel::validate(src, dst);
+ return kernels::ClQuantizeKernel::validate(src, dst);
}
-void ClDequantization::run(ITensorPack &tensors)
+void ClQuantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
CLScheduler::get().enqueue_op(*_kernel.get(), tensors);
diff --git a/src/runtime/gpu/cl/operators/ClQuantization.h b/src/runtime/gpu/cl/operators/ClQuantize.h
index d938ff95a0..0b6d2c8cbe 100644
--- a/src/runtime/gpu/cl/operators/ClQuantization.h
+++ b/src/runtime/gpu/cl/operators/ClQuantize.h
@@ -21,10 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_QUANTIZATION_H
-#define ARM_COMPUTE_CL_QUANTIZATION_H
+#ifndef ARM_COMPUTE_CL_QUANTIZE_H
+#define ARM_COMPUTE_CL_QUANTIZE_H
-#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/gpu/cl/ClCompileContext.h"
#include "src/runtime/gpu/cl/IClOperator.h"
@@ -32,15 +31,12 @@ namespace arm_compute
{
namespace opencl
{
-/** Basic function to quantize a tensor. This function calls the following OpenCL kernel:
- *
- * -# @ref kernels::ClQuantizationKernel
- */
-class ClQuantization : public IClOperator
+/** Basic function to run @ref kernels::ClQuantizeKernel that dequantizes an input tensor */
+class ClQuantize : public IClOperator
{
public:
/** Constructor */
- ClQuantization() = default;
+ ClQuantize() = default;
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -50,10 +46,9 @@ public:
* @note Output auto initialization is not supported by this function
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/32.
- * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
+ * Similar to @ref ClQuantize::configure()
*
* @return a status
*/
@@ -63,5 +58,5 @@ public:
void run(ITensorPack &tensors) override;
};
} // namespace opencl
-} //namespace arm_compute
-#endif /* ARM_COMPUTE_CL_QUANTIZATION_H */
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_QUANTIZE_H */