aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/NEON/functions/NEQuantizationLayer.h')
-rw-r--r--arm_compute/runtime/NEON/functions/NEQuantizationLayer.h30
1 files changed, 24 insertions, 6 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
index 8b0532beea..54ec76b177 100644
--- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
@@ -24,26 +24,37 @@
#ifndef ARM_COMPUTE_NEQUANTIZATIONLAYER_H
#define ARM_COMPUTE_NEQUANTIZATIONLAYER_H
+#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IRuntimeContext.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
-
-#include "arm_compute/core/Types.h"
+#include <memory>
namespace arm_compute
{
class ITensor;
class ITensorInfo;
-/** Basic function to simulate a quantization layer. This function calls the following Neon kernels:
+/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) implementation layers:
*
*
- * -# @ref NEQuantizationLayerKernel
+ * -# @ref cpu::CpuQuantization
*
*/
-class NEQuantizationLayer : public INESimpleFunctionNoBorder
+class NEQuantizationLayer : public IFunction
{
public:
+ NEQuantizationLayer();
+ /** Default Destructor */
+ ~NEQuantizationLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEQuantizationLayer(const NEQuantizationLayer &) = delete;
+ /** Default move constructor */
+ NEQuantizationLayer(NEQuantizationLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEQuantizationLayer &operator=(const NEQuantizationLayer &) = delete;
+ /** Default move assignment operator */
+ NEQuantizationLayer &operator=(NEQuantizationLayer &&) = default;
/** Set the input and output tensors.
*
* @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
@@ -58,6 +69,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEQUANTIZATIONLAYER_H */