diff options
Diffstat (limited to 'src/runtime')
-rw-r--r-- | src/runtime/CL/functions/CLDequantizationLayer.cpp | 12 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLQuantizationLayer.cpp | 12 | ||||
-rw-r--r-- | src/runtime/NEON/functions/NEDequantizationLayer.cpp | 12 | ||||
-rw-r--r-- | src/runtime/NEON/functions/NEQuantizationLayer.cpp | 12 | ||||
-rw-r--r-- | src/runtime/cpu/operators/CpuDequantize.cpp (renamed from src/runtime/cpu/operators/CpuDequantization.cpp) | 14 | ||||
-rw-r--r-- | src/runtime/cpu/operators/CpuDequantize.h (renamed from src/runtime/cpu/operators/CpuDequantization.h) | 22 | ||||
-rw-r--r-- | src/runtime/cpu/operators/CpuQuantize.cpp (renamed from src/runtime/cpu/operators/CpuQuantization.cpp) | 14 | ||||
-rw-r--r-- | src/runtime/cpu/operators/CpuQuantize.h (renamed from src/runtime/cpu/operators/CpuQuantization.h) | 29 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClDequantize.cpp (renamed from src/runtime/gpu/cl/operators/ClQuantization.cpp) | 14 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClDequantize.h (renamed from src/runtime/gpu/cl/operators/ClDequantization.h) | 18 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClQuantize.cpp (renamed from src/runtime/gpu/cl/operators/ClDequantization.cpp) | 15 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClQuantize.h (renamed from src/runtime/gpu/cl/operators/ClQuantization.h) | 23 |
12 files changed, 86 insertions, 111 deletions
diff --git a/src/runtime/CL/functions/CLDequantizationLayer.cpp b/src/runtime/CL/functions/CLDequantizationLayer.cpp index e0381f90ae..3b104017e7 100644 --- a/src/runtime/CL/functions/CLDequantizationLayer.cpp +++ b/src/runtime/CL/functions/CLDequantizationLayer.cpp @@ -27,15 +27,15 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/KernelDescriptors.h" #include "src/core/CL/ICLKernel.h" -#include "src/runtime/gpu/cl/operators/ClDequantization.h" +#include "src/runtime/gpu/cl/operators/ClDequantize.h" namespace arm_compute { struct CLDequantizationLayer::Impl { - const ICLTensor *src{ nullptr }; - ICLTensor *dst{ nullptr }; - std::unique_ptr<opencl::ClDequantization> op{ nullptr }; + const ICLTensor *src{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr<opencl::ClDequantize> op{ nullptr }; }; CLDequantizationLayer::CLDequantizationLayer() @@ -54,13 +54,13 @@ void CLDequantizationLayer::configure(const CLCompileContext &compile_context, c _impl->src = input; _impl->dst = output; - _impl->op = std::make_unique<opencl::ClDequantization>(); + _impl->op = std::make_unique<opencl::ClDequantize>(); _impl->op->configure(compile_context, input->info(), output->info()); } Status CLDequantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output) { - return opencl::ClDequantization::validate(input, output); + return opencl::ClDequantize::validate(input, output); } void CLDequantizationLayer::run() diff --git a/src/runtime/CL/functions/CLQuantizationLayer.cpp b/src/runtime/CL/functions/CLQuantizationLayer.cpp index 1f6ddb6014..e6451b2eb4 100644 --- a/src/runtime/CL/functions/CLQuantizationLayer.cpp +++ b/src/runtime/CL/functions/CLQuantizationLayer.cpp @@ -26,15 +26,15 @@ #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" #include "src/core/CL/ICLKernel.h" -#include "src/runtime/gpu/cl/operators/ClQuantization.h" +#include "src/runtime/gpu/cl/operators/ClQuantize.h" namespace arm_compute { struct CLQuantizationLayer::Impl { - const ICLTensor *src{ nullptr }; - ICLTensor *dst{ nullptr }; - std::unique_ptr<opencl::ClQuantization> op{ nullptr }; + const ICLTensor *src{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr<opencl::ClQuantize> op{ nullptr }; }; CLQuantizationLayer::CLQuantizationLayer() @@ -53,13 +53,13 @@ void CLQuantizationLayer::configure(const CLCompileContext &compile_context, con _impl->src = input; _impl->dst = output; - _impl->op = std::make_unique<opencl::ClQuantization>(); + _impl->op = std::make_unique<opencl::ClQuantize>(); _impl->op->configure(compile_context, input->info(), output->info()); } Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output) { - return opencl::ClQuantization::validate(input, output); + return opencl::ClQuantize::validate(input, output); } void CLQuantizationLayer::run() diff --git a/src/runtime/NEON/functions/NEDequantizationLayer.cpp b/src/runtime/NEON/functions/NEDequantizationLayer.cpp index 210fbe0eb2..91e37594af 100644 --- a/src/runtime/NEON/functions/NEDequantizationLayer.cpp +++ b/src/runtime/NEON/functions/NEDequantizationLayer.cpp @@ -26,15 +26,15 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/runtime/Tensor.h" -#include "src/runtime/cpu/operators/CpuDequantization.h" +#include "src/runtime/cpu/operators/CpuDequantize.h" namespace arm_compute { struct NEDequantizationLayer::Impl { - const ITensor *src{ nullptr }; - ITensor *dst{ nullptr }; - std::unique_ptr<cpu::CpuDequantization> op{ nullptr }; + const ITensor *src{ nullptr }; + ITensor *dst{ nullptr }; + std::unique_ptr<cpu::CpuDequantize> op{ nullptr }; }; NEDequantizationLayer::NEDequantizationLayer() @@ -47,13 +47,13 @@ void NEDequantizationLayer::configure(const ITensor *input, ITensor *output) { _impl->src = input; _impl->dst = output; - _impl->op = std::make_unique<cpu::CpuDequantization>(); + _impl->op = std::make_unique<cpu::CpuDequantize>(); _impl->op->configure(input->info(), output->info()); } Status NEDequantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output) { - return cpu::CpuDequantization::validate(input, output); + return cpu::CpuDequantize::validate(input, output); } void NEDequantizationLayer::run() diff --git a/src/runtime/NEON/functions/NEQuantizationLayer.cpp b/src/runtime/NEON/functions/NEQuantizationLayer.cpp index 58ba68725b..e607917615 100644 --- a/src/runtime/NEON/functions/NEQuantizationLayer.cpp +++ b/src/runtime/NEON/functions/NEQuantizationLayer.cpp @@ -26,15 +26,15 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/runtime/Tensor.h" -#include "src/runtime/cpu/operators/CpuQuantization.h" +#include "src/runtime/cpu/operators/CpuQuantize.h" namespace arm_compute { struct NEQuantizationLayer::Impl { - const ITensor *src{ nullptr }; - ITensor *dst{ nullptr }; - std::unique_ptr<cpu::CpuQuantization> op{ nullptr }; + const ITensor *src{ nullptr }; + ITensor *dst{ nullptr }; + std::unique_ptr<cpu::CpuQuantize> op{ nullptr }; }; NEQuantizationLayer::NEQuantizationLayer() @@ -45,14 +45,14 @@ NEQuantizationLayer::~NEQuantizationLayer() = default; Status NEQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output) { - return cpu::CpuQuantization::validate(input, output); + return cpu::CpuQuantize::validate(input, output); } void NEQuantizationLayer::configure(const ITensor *input, ITensor *output) { _impl->src = input; _impl->dst = output; - _impl->op = std::make_unique<cpu::CpuQuantization>(); + _impl->op = std::make_unique<cpu::CpuQuantize>(); _impl->op->configure(input->info(), output->info()); } diff --git a/src/runtime/cpu/operators/CpuDequantization.cpp b/src/runtime/cpu/operators/CpuDequantize.cpp index 0a3f602da1..80a2e28aee 100644 --- a/src/runtime/cpu/operators/CpuDequantization.cpp +++ b/src/runtime/cpu/operators/CpuDequantize.cpp @@ -21,30 +21,30 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/runtime/cpu/operators/CpuDequantization.h" +#include "src/runtime/cpu/operators/CpuDequantize.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/core/cpu/kernels/CpuDequantizationKernel.h" +#include "src/core/cpu/kernels/CpuDequantizeKernel.h" namespace arm_compute { namespace cpu { -void CpuDequantization::configure(const ITensorInfo *src, ITensorInfo *dst) +void CpuDequantize::configure(const ITensorInfo *src, ITensorInfo *dst) { - auto k = std::make_unique<kernels::CpuDequantizationKernel>(); + auto k = std::make_unique<kernels::CpuDequantizeKernel>(); k->configure(src, dst); _kernel = std::move(k); } -Status CpuDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status CpuDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst) { - return kernels::CpuDequantizationKernel::validate(src, dst); + return kernels::CpuDequantizeKernel::validate(src, dst); } -void CpuDequantization::run(ITensorPack &tensors) +void CpuDequantize::run(ITensorPack &tensors) { ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided"); prepare(tensors); diff --git a/src/runtime/cpu/operators/CpuDequantization.h b/src/runtime/cpu/operators/CpuDequantize.h index 22f8114149..d1fb9e8d0e 100644 --- a/src/runtime/cpu/operators/CpuDequantization.h +++ b/src/runtime/cpu/operators/CpuDequantize.h @@ -21,36 +21,30 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_DEQUANTIZATION_H -#define ARM_COMPUTE_CPU_DEQUANTIZATION_H +#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_H +#define ARM_COMPUTE_CPU_DEQUANTIZE_H -#include "arm_compute/core/ITensorInfo.h" -#include "arm_compute/core/experimental/Types.h" -#include "src/core/cpu/ICpuKernel.h" #include "src/runtime/cpu/ICpuOperator.h" -#include <memory> - namespace arm_compute { namespace cpu { -/** Basic function to run @ref kernels::CpuDequantizationKernel that dequantizes an input tensor */ -class CpuDequantization : public ICpuOperator +/** Basic function to run @ref kernels::CpuDequantizeKernel that dequantizes an input tensor */ +class CpuDequantize : public ICpuOperator { public: /** Default Constructor */ - CpuDequantization() = default; + CpuDequantize() = default; /** Configure the kernel. * * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantization + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. - * @param[in] dst Destination tensor info. Data type supported: F16/F32. + * Similar to @ref CpuDequantize::configure() * * @return a status */ @@ -61,4 +55,4 @@ public: }; } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_DEQUANTIZATION_H */ +#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_H */ diff --git a/src/runtime/cpu/operators/CpuQuantization.cpp b/src/runtime/cpu/operators/CpuQuantize.cpp index ede13850e7..5af7f6343b 100644 --- a/src/runtime/cpu/operators/CpuQuantization.cpp +++ b/src/runtime/cpu/operators/CpuQuantize.cpp @@ -22,34 +22,34 @@ * SOFTWARE. */ -#include "src/runtime/cpu/operators/CpuQuantization.h" +#include "src/runtime/cpu/operators/CpuQuantize.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/core/cpu/kernels/CpuQuantizationKernel.h" +#include "src/core/cpu/kernels/CpuQuantizeKernel.h" namespace arm_compute { namespace cpu { -Status CpuQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status CpuQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst) { - ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizationKernel::validate(src, dst)); + ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizeKernel::validate(src, dst)); return Status{}; } -void CpuQuantization::configure(ITensorInfo *src, ITensorInfo *dst) +void CpuQuantize::configure(const ITensorInfo *src, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); // Configure quantize kernel - auto k = std::make_unique<kernels::CpuQuantizationKernel>(); + auto k = std::make_unique<kernels::CpuQuantizeKernel>(); k->configure(src, dst); _kernel = std::move(k); } -void CpuQuantization::run(ITensorPack &tensors) +void CpuQuantize::run(ITensorPack &tensors) { ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided"); NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors); diff --git a/src/runtime/cpu/operators/CpuQuantization.h b/src/runtime/cpu/operators/CpuQuantize.h index 97f0c5fa79..09afffd920 100644 --- a/src/runtime/cpu/operators/CpuQuantization.h +++ b/src/runtime/cpu/operators/CpuQuantize.h @@ -21,41 +21,30 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_QUANTIZATION_H -#define ARM_COMPUTE_CPU_QUANTIZATION_H +#ifndef ARM_COMPUTE_CPU_QUANTIZE_H +#define ARM_COMPUTE_CPU_QUANTIZE_H -#include "arm_compute/core/ITensorInfo.h" -#include "arm_compute/core/experimental/Types.h" -#include "src/core/cpu/ICpuKernel.h" #include "src/runtime/cpu/ICpuOperator.h" -#include <memory> - namespace arm_compute { namespace cpu { -/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) kernels: - * - * - * -# @ref kernels::CpuQuantizationKernel - * - */ -class CpuQuantization : public ICpuOperator +/** Basic function to run @ref kernels::CpuQuantizeKernel that dequantizes an input tensor */ +class CpuQuantize : public ICpuOperator { public: /** Default Constructor */ - CpuQuantization() = default; + CpuQuantize() = default; /** Set the input and output tensors. * * @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. * @param[out] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16 */ - void configure(ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantization + void configure(const ITensorInfo *src, ITensorInfo *dst); + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. - * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16 + * Similar to @ref CpuQuantize::configure() * * @return a status */ @@ -66,4 +55,4 @@ public: }; } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_QUANTIZATION_H */ +#endif /* ARM_COMPUTE_CPU_QUANTIZE_H */ diff --git a/src/runtime/gpu/cl/operators/ClQuantization.cpp b/src/runtime/gpu/cl/operators/ClDequantize.cpp index 2e753b550e..0c1391bb45 100644 --- a/src/runtime/gpu/cl/operators/ClQuantization.cpp +++ b/src/runtime/gpu/cl/operators/ClDequantize.cpp @@ -21,30 +21,30 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/runtime/gpu/cl/operators/ClQuantization.h" +#include "src/runtime/gpu/cl/operators/ClDequantize.h" #include "arm_compute/core/Error.h" #include "arm_compute/runtime/CL/CLScheduler.h" #include "src/core/gpu/cl/ClCompileContext.h" -#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h" +#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h" namespace arm_compute { namespace opencl { -void ClQuantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) +void ClDequantize::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) { - auto k = std::make_unique<kernels::ClQuantizationKernel>(); + auto k = std::make_unique<kernels::ClDequantizeKernel>(); k->configure(compile_context, src, dst); _kernel = std::move(k); } -Status ClQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status ClDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst) { - return kernels::ClQuantizationKernel::validate(src, dst); + return kernels::ClDequantizeKernel::validate(src, dst); } -void ClQuantization::run(ITensorPack &tensors) +void ClDequantize::run(ITensorPack &tensors) { ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided"); CLScheduler::get().enqueue_op(*_kernel.get(), tensors); diff --git a/src/runtime/gpu/cl/operators/ClDequantization.h b/src/runtime/gpu/cl/operators/ClDequantize.h index a696b73d2e..47fad3eeee 100644 --- a/src/runtime/gpu/cl/operators/ClDequantization.h +++ b/src/runtime/gpu/cl/operators/ClDequantize.h @@ -21,10 +21,9 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_DEQUANTIZATION_H -#define ARM_COMPUTE_CL_DEQUANTIZATION_H +#ifndef ARM_COMPUTE_CL_DEQUANTIZE_H +#define ARM_COMPUTE_CL_DEQUANTIZE_H -#include "arm_compute/core/KernelDescriptors.h" #include "src/core/gpu/cl/ClCompileContext.h" #include "src/runtime/gpu/cl/IClOperator.h" @@ -32,12 +31,12 @@ namespace arm_compute { namespace opencl { -/** Basic function to run @ref kernels::ClDequantizationKernel that dequantizes an input tensor */ -class ClDequantization : public IClOperator +/** Basic function to run @ref kernels::ClDequantizeKernel that dequantizes an input tensor */ +class ClDequantize : public IClOperator { public: /** Constructor */ - ClDequantization() = default; + ClDequantize() = default; /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. @@ -45,10 +44,9 @@ public: * @param[out] dst Destination tensor info with the same dimensions of @p src. Data type supported: F16/F32. */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. - * @param[in] dst Output tensor info. Data type supported: F16/F32. + * Similar to @ref ClDequantize::configure() * * @return a status */ @@ -59,4 +57,4 @@ public: }; } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_DEQUANTIZATION_H */ +#endif /* ARM_COMPUTE_CL_DEQUANTIZE_H */ diff --git a/src/runtime/gpu/cl/operators/ClDequantization.cpp b/src/runtime/gpu/cl/operators/ClQuantize.cpp index df3203d2e1..92bbb62ba5 100644 --- a/src/runtime/gpu/cl/operators/ClDequantization.cpp +++ b/src/runtime/gpu/cl/operators/ClQuantize.cpp @@ -21,31 +21,30 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/runtime/gpu/cl/operators/ClDequantization.h" +#include "src/runtime/gpu/cl/operators/ClQuantize.h" #include "arm_compute/core/Error.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "src/core/CL/kernels/CLFillBorderKernel.h" #include "src/core/gpu/cl/ClCompileContext.h" -#include "src/core/gpu/cl/kernels/ClDequantizationKernel.h" +#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h" namespace arm_compute { namespace opencl { -void ClDequantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) +void ClQuantize::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) { - auto k = std::make_unique<kernels::ClDequantizationKernel>(); + auto k = std::make_unique<kernels::ClQuantizeKernel>(); k->configure(compile_context, src, dst); _kernel = std::move(k); } -Status ClDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status ClQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst) { - return kernels::ClDequantizationKernel::validate(src, dst); + return kernels::ClQuantizeKernel::validate(src, dst); } -void ClDequantization::run(ITensorPack &tensors) +void ClQuantize::run(ITensorPack &tensors) { ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided"); CLScheduler::get().enqueue_op(*_kernel.get(), tensors); diff --git a/src/runtime/gpu/cl/operators/ClQuantization.h b/src/runtime/gpu/cl/operators/ClQuantize.h index d938ff95a0..0b6d2c8cbe 100644 --- a/src/runtime/gpu/cl/operators/ClQuantization.h +++ b/src/runtime/gpu/cl/operators/ClQuantize.h @@ -21,10 +21,9 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_QUANTIZATION_H -#define ARM_COMPUTE_CL_QUANTIZATION_H +#ifndef ARM_COMPUTE_CL_QUANTIZE_H +#define ARM_COMPUTE_CL_QUANTIZE_H -#include "arm_compute/core/KernelDescriptors.h" #include "src/core/gpu/cl/ClCompileContext.h" #include "src/runtime/gpu/cl/IClOperator.h" @@ -32,15 +31,12 @@ namespace arm_compute { namespace opencl { -/** Basic function to quantize a tensor. This function calls the following OpenCL kernel: - * - * -# @ref kernels::ClQuantizationKernel - */ -class ClQuantization : public IClOperator +/** Basic function to run @ref kernels::ClQuantizeKernel that dequantizes an input tensor */ +class ClQuantize : public IClOperator { public: /** Constructor */ - ClQuantization() = default; + ClQuantize() = default; /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. @@ -50,10 +46,9 @@ public: * @note Output auto initialization is not supported by this function */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/32. - * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. + * Similar to @ref ClQuantize::configure() * * @return a status */ @@ -63,5 +58,5 @@ public: void run(ITensorPack &tensors) override; }; } // namespace opencl -} //namespace arm_compute -#endif /* ARM_COMPUTE_CL_QUANTIZATION_H */ +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_QUANTIZE_H */ |