aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/CL/functions/CLDequantizationLayer.h')
-rw-r--r--arm_compute/runtime/CL/functions/CLDequantizationLayer.h44
1 files changed, 39 insertions, 5 deletions
diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
index c0a0fcd988..b01fe9eb14 100644
--- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,21 +24,48 @@
#ifndef ARM_COMPUTE_CLDEQUANTIZATIONLAYER_H
#define ARM_COMPUTE_CLDEQUANTIZATIONLAYER_H
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
namespace arm_compute
{
// Forward declarations
+class CLCompileContext;
class ICLTensor;
+class ITensorInfo;
-/** Basic function to run @ref CLDequantizationLayerKernel that dequantizes an input tensor */
-class CLDequantizationLayer : public ICLSimpleFunction
+/** Basic function to run @ref opencl::ClDequantize that dequantizes an input tensor */
+class CLDequantizationLayer : public IFunction
{
public:
+ /** Default Constructor */
+ CLDequantizationLayer();
+ /** Default Destructor */
+ ~CLDequantizationLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDequantizationLayer(const CLDequantizationLayer &) = delete;
+ /** Default move constructor */
+ CLDequantizationLayer(CLDequantizationLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDequantizationLayer &operator=(const CLDequantizationLayer &) = delete;
+ /** Default move assignment operator */
+ CLDequantizationLayer &operator=(CLDequantizationLayer &&) = default;
/** Set the input and output tensors.
*
+ * Valid data layouts:
+ * - All
+ *
+ * Valid data type configurations:
+ * |src |dst |
+ * |:------------------|:---------|
+ * |QASYMM8 |F16, F32 |
+ * |QASYMM8_SIGNED |F16, F32 |
+ * |QSYMM8_PER_CHANNEL |F16, F32 |
+ * |QSYMM8 |F16, F32 |
+ * |QSYMM16 |F16, F32 |
+ *
* @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches.
* Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
@@ -60,6 +87,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLDEQUANTIZATIONLAYER_H */