aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON/functions
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/NEON/functions')
-rw-r--r--arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEQuantizationLayer.h30
2 files changed, 27 insertions, 9 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h b/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h
index 90e2307ce8..979b3ba83e 100644
--- a/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h
@@ -44,14 +44,14 @@ class NEComputeAllAnchorsKernel;
/** Basic function to generate proposals for a RPN (Region Proposal Network)
*
- * This function calls the following Neon kernels:
+ * This function calls the following Arm(R) Neon(TM) layers/kernels:
* -# @ref NEComputeAllAnchorsKernel
* -# @ref NEPermute x 2
* -# @ref NEReshapeLayer x 2
* -# @ref NEBoundingBoxTransform
* -# @ref NEPadLayerKernel
* -# @ref NEDequantizationLayerKernel x 2
- * -# @ref NEQuantizationLayerKernel
+ * -# @ref NEQuantizationLayer
* And the following CPP kernels:
* -# @ref CPPBoxWithNonMaximaSuppressionLimit
*/
@@ -113,7 +113,7 @@ private:
// Memory group manager
MemoryGroup _memory_group;
- // Neon kernels
+ // kernels/layers
NEPermute _permute_deltas;
NEReshapeLayer _flatten_deltas;
NEPermute _permute_scores;
diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
index 8b0532beea..54ec76b177 100644
--- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
@@ -24,26 +24,37 @@
#ifndef ARM_COMPUTE_NEQUANTIZATIONLAYER_H
#define ARM_COMPUTE_NEQUANTIZATIONLAYER_H
+#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IRuntimeContext.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
-
-#include "arm_compute/core/Types.h"
+#include <memory>
namespace arm_compute
{
class ITensor;
class ITensorInfo;
-/** Basic function to simulate a quantization layer. This function calls the following Neon kernels:
+/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) implementation layers:
*
*
- * -# @ref NEQuantizationLayerKernel
+ * -# @ref cpu::CpuQuantization
*
*/
-class NEQuantizationLayer : public INESimpleFunctionNoBorder
+class NEQuantizationLayer : public IFunction
{
public:
+ NEQuantizationLayer();
+ /** Default Destructor */
+ ~NEQuantizationLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEQuantizationLayer(const NEQuantizationLayer &) = delete;
+ /** Default move constructor */
+ NEQuantizationLayer(NEQuantizationLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEQuantizationLayer &operator=(const NEQuantizationLayer &) = delete;
+ /** Default move assignment operator */
+ NEQuantizationLayer &operator=(NEQuantizationLayer &&) = default;
/** Set the input and output tensors.
*
* @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
@@ -58,6 +69,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEQUANTIZATIONLAYER_H */