aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/NEON/functions/NEROIAlignLayer.h')
-rw-r--r--arm_compute/runtime/NEON/functions/NEROIAlignLayer.h39
1 files changed, 25 insertions, 14 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h b/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
index 04a24ac7ec..b06ebe899d 100644
--- a/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 ARM Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,27 +24,35 @@
#ifndef ARM_COMPUTE_NEROIALIGNLAYER_H
#define ARM_COMPUTE_NEROIALIGNLAYER_H
-#include "arm_compute/runtime/NEON/INESimpleFunction.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
namespace arm_compute
{
class ITensor;
+class ITensorInfo;
-/** Basic function to run @ref NEROIAlignLayerKernel.
- *
- * This function calls the following NEON kernels:
- * -# @ref NEROIAlignLayerKernel
- *
- */
-class NEROIAlignLayer : public INESimpleFunction
+/** Basic function to run @ref NEROIAlignLayerKernel. */
+class NEROIAlignLayer : public INESimpleFunctionNoBorder
{
public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * Valid data layouts:
+ * - All
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |dst |
+ * |:--------------|:--------------|:--------------|
+ * |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |
+ * |QASYMM8 |QASYMM16 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM16 |QASYMM8_SIGNED |
+ *
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] rois ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
* as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ].
- * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8, otherwise same as @p input
+ * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8/QASYMM8_SIGNED, otherwise same as @p input
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
*
@@ -56,8 +64,8 @@ public:
void configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref NEROIAlignLayerKernel
*
- * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
- * @param[in] rois ROIs tensor info. Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8,
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] rois ROIs tensor info. Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8/QASYMM8_SIGNED,
* otherwise same as @p input
* @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
@@ -69,7 +77,10 @@ public:
*
* @return a Status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *rois,
+ ITensorInfo *output,
+ const ROIPoolingLayerInfo &pool_info);
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEROIALIGNLAYER_H */