diff options
Diffstat (limited to 'arm_compute/runtime/NEON/functions/NEQuantizationLayer.h')
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEQuantizationLayer.h | 44 |
1 files changed, 34 insertions, 10 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h index 8b0532beea..7bf97e28a5 100644 --- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h @@ -24,28 +24,45 @@ #ifndef ARM_COMPUTE_NEQUANTIZATIONLAYER_H #define ARM_COMPUTE_NEQUANTIZATIONLAYER_H +#include "arm_compute/core/Types.h" #include "arm_compute/runtime/IFunction.h" +#include "arm_compute/runtime/IRuntimeContext.h" -#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" - -#include "arm_compute/core/Types.h" +#include <memory> namespace arm_compute { class ITensor; class ITensorInfo; -/** Basic function to simulate a quantization layer. This function calls the following Neon kernels: - * - * - * -# @ref NEQuantizationLayerKernel - * - */ -class NEQuantizationLayer : public INESimpleFunctionNoBorder +/** Basic function to run a quantization layer using @ref cpu::CpuQuantize */ +class NEQuantizationLayer : public IFunction { public: + NEQuantizationLayer(); + /** Default Destructor */ + ~NEQuantizationLayer(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEQuantizationLayer(const NEQuantizationLayer &) = delete; + /** Default move constructor */ + NEQuantizationLayer(NEQuantizationLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEQuantizationLayer &operator=(const NEQuantizationLayer &) = delete; + /** Default move assignment operator */ + NEQuantizationLayer &operator=(NEQuantizationLayer &&) = default; /** Set the input and output tensors. * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:------------------|:--------------------------------------| + * |QASYMM8 |QASYMM8, QASYMM8_SIGNED, QASYMM16 | + * |QASYMM8_SIGNED |QASYMM8, QASYMM8_SIGNED, QASYMM16 | + * |F16 |QASYMM8, QASYMM8_SIGNED, QASYMM16 | + * |F32 |QASYMM8, QASYMM8_SIGNED, QASYMM16 | + * * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16 */ @@ -58,6 +75,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr<Impl> _impl; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEQUANTIZATIONLAYER_H */ |