aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NEChannelShuffleLayerKernel.h80
-rw-r--r--arm_compute/core/Validate.h52
3 files changed, 133 insertions, 0 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index cb2e851b66..76eb5cb5f2 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -39,6 +39,7 @@
#include "arm_compute/core/NEON/kernels/NECannyEdgeKernel.h"
#include "arm_compute/core/NEON/kernels/NEChannelCombineKernel.h"
#include "arm_compute/core/NEON/kernels/NEChannelExtractKernel.h"
+#include "arm_compute/core/NEON/kernels/NEChannelShuffleLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NECol2ImKernel.h"
#include "arm_compute/core/NEON/kernels/NEColorConvertKernel.h"
#include "arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEChannelShuffleLayerKernel.h b/arm_compute/core/NEON/kernels/NEChannelShuffleLayerKernel.h
new file mode 100644
index 0000000000..e9ef2d4a51
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEChannelShuffleLayerKernel.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NECHANNELSHUFFLELAYERKERNEL_H__
+#define __ARM_COMPUTE_NECHANNELSHUFFLELAYERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Interface for the channel shuffle kernel */
+class NEChannelShuffleLayerKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEChannelShuffleLayerKernel";
+ }
+ /** Default constructor */
+ NEChannelShuffleLayerKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEChannelShuffleLayerKernel(const NEChannelShuffleLayerKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEChannelShuffleLayerKernel &operator=(const NEChannelShuffleLayerKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEChannelShuffleLayerKernel(NEChannelShuffleLayerKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEChannelShuffleLayerKernel &operator=(NEChannelShuffleLayerKernel &&) = default;
+ /** Default destructor */
+ ~NEChannelShuffleLayerKernel() = default;
+ /** Configure function's inputs and outputs.
+ *
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[out] output Output tensor. Data type supported: Same as @p input
+ * @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
+ */
+ void configure(const ITensor *input, ITensor *output, unsigned int num_groups);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEChannelShuffleLayerKernel
+ *
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[out] output Output tensor. Data type supported: Same as @p input
+ * @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_groups);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ const ITensor *_input;
+ ITensor *_output;
+ unsigned int _num_groups;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NECHANNELSHUFFLELAYERKERNEL_H__ */
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 918c8e5fc3..dab4221a3b 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -693,6 +693,58 @@ inline arm_compute::Status error_on_data_type_not_in(const char *function, const
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
+/** Return an error if the data layout of the passed tensor info does not match any of the data layouts provided.
+ *
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info Tensor info to validate.
+ * @param[in] dl First data layout allowed.
+ * @param[in] dls (Optional) Further allowed data layouts.
+ *
+ * @return Status
+ */
+template <typename T, typename... Ts>
+inline arm_compute::Status error_on_data_layout_not_in(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info, T &&dl, Ts &&... dls)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_info == nullptr, function, file, line);
+
+ const DataLayout &tensor_dl = tensor_info->data_layout(); //NOLINT
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_dl == DataLayout::UNKNOWN, function, file, line);
+
+ const std::array<T, sizeof...(Ts)> dls_array{ { std::forward<Ts>(dls)... } };
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(tensor_dl != dl && std::none_of(dls_array.begin(), dls_array.end(), [&](const T & l)
+ {
+ return l == tensor_dl;
+ }),
+ function, file, line, "ITensor data layout %s not supported by this kernel", string_from_data_layout(tensor_dl).c_str());
+ return arm_compute::Status{};
+}
+/** Return an error if the data layout of the passed tensor does not match any of the data layout provided.
+ *
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor Tensor to validate.
+ * @param[in] dl First data layout allowed.
+ * @param[in] dls (Optional) Further allowed data layouts.
+ *
+ * @return Status
+ */
+template <typename T, typename... Ts>
+inline arm_compute::Status error_on_data_layout_not_in(const char *function, const char *file, const int line,
+ const ITensor *tensor, T &&dl, Ts &&... dls)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
+ ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_layout_not_in(function, file, line, tensor->info(), std::forward<T>(dl), std::forward<Ts>(dls)...));
+ return arm_compute::Status{};
+}
+#define ARM_COMPUTE_ERROR_ON_DATA_LAYOUT_NOT_IN(t, ...) \
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_layout_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
+#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(t, ...) \
+ ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_layout_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
+
/** Return an error if the data type or the number of channels of the passed tensor info does not match any of the data types and number of channels provided.
*
* @param[in] function Function in which the error occurred.