aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-11-13 17:24:43 +0000
committerSiCong Li <sicong.li@arm.com>2019-11-21 13:21:33 +0000
commit3e84bb662f9c6c3f77837640b44c41b7e3403ed4 (patch)
tree7096e2a6506baf62ddea78556413fa193ebec76f /arm_compute
parent5f7dda6f415f8f065f86b9f52ee6c5c85bbaa5e4 (diff)
downloadComputeLibrary-3e84bb662f9c6c3f77837640b44c41b7e3403ed4.tar.gz
COMPMID-2920: NEInstanceNormalization fails on NHWC validations
Improved TensorInfo to accept DataLayout, useful to test the validate functions Removing nighlies tests Moving all vpadds instructions in add.h Change-Id: I96290a6f26272eae865dba48bbc3c6aee4bc0214 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/2287 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h6
-rw-r--r--arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h4
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/add.h20
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/padd.h53
-rw-r--r--arm_compute/core/TensorInfo.h9
6 files changed, 34 insertions, 59 deletions
diff --git a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
index bc016d1ceb..00a8a346d9 100644
--- a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -49,7 +49,8 @@ public:
/** Set the input and output tensors.
*
- * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW
+ * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
+ * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
* @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
@@ -59,8 +60,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
*
- * @param[in] input Source tensor info. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
- * Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
* @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
* @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
* @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
diff --git a/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h
index 9745d266b8..c34119796d 100644
--- a/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h
@@ -53,6 +53,7 @@ public:
/** Set the input and output tensors.
*
* @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW
+ * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
* @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
@@ -62,8 +63,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEInstanceNormalizationLayer.
*
- * @param[in] input Source tensor info. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
- * Data types supported: F16/F32. Data layout supported: NCHW
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NCHW
* @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
* @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
* @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/add.h b/arm_compute/core/NEON/wrapper/intrinsics/add.h
index 1839170485..f0823463af 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/add.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/add.h
@@ -176,6 +176,26 @@ VPADDL_IMPL(int32x4_t, int16x8_t, vpaddlq, s16)
VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32)
VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32)
#undef VPADDL_IMPL
+
+// VPADD: Add pairwise
+#define VPADD_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vpadd(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8)
+VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8)
+VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16)
+VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16)
+VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32)
+VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32)
+VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPADD_IMPL
} // namespace wrapper
} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_ADD_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 6eae1cf801..d9b8297cb9 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -53,7 +53,6 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/neg.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/not.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/orr.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/padd.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pmax.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pow.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/padd.h b/arm_compute/core/NEON/wrapper/intrinsics/padd.h
deleted file mode 100644
index 5ee2173df8..0000000000
--- a/arm_compute/core/NEON/wrapper/intrinsics/padd.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_WRAPPER_PADD_H__
-#define __ARM_COMPUTE_WRAPPER_PADD_H__
-
-#include <arm_neon.h>
-
-namespace arm_compute
-{
-namespace wrapper
-{
-#define VPADD_IMPL(stype, vtype, prefix, postfix) \
- inline vtype vpadd(const vtype &a, const vtype &b) \
- { \
- return prefix##_##postfix(a, b); \
- }
-
-VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8)
-VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8)
-VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16)
-VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16)
-VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32)
-VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32)
-VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32)
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16)
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-#undef VPADD_IMPL
-} // namespace wrapper
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_WRAPPER_PADD_H__ */
diff --git a/arm_compute/core/TensorInfo.h b/arm_compute/core/TensorInfo.h
index d1a64f59ef..a68f769c51 100644
--- a/arm_compute/core/TensorInfo.h
+++ b/arm_compute/core/TensorInfo.h
@@ -101,6 +101,15 @@ public:
/** Constructor
*
+ * @param[in] tensor_shape It specifies the size for each dimension of the tensor in number of elements.
+ * @param[in] num_channels It indicates the number of channels for each tensor element
+ * @param[in] data_type Data type to use for each tensor element
+ * @param[in] data_layout The data layout setting for the tensor data.
+ */
+ TensorInfo(const TensorShape &tensor_shape, size_t num_channels, DataType data_type, DataLayout data_layout);
+
+ /** Constructor
+ *
* @param[in] tensor_shape It specifies the size for each dimension of the tensor in number of elements.
* @param[in] num_channels It indicates the number of channels for each tensor element
* @param[in] data_type Data type to use for each tensor element