aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2020-10-19 16:00:11 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-10-20 10:27:40 +0000
commit68dd25fbe6e4d3c3513fa5993863419769aa08fc (patch)
treeb918be923f9e4550c306d7f44d168ab938a71fc8 /src/core/NEON/kernels/convolution
parentf0a4e609d98f111b6a7d4a2b578d1b7cba64b805 (diff)
downloadComputeLibrary-68dd25fbe6e4d3c3513fa5993863419769aa08fc.tar.gz
COMPMID-3637: Move utility headers from arm_compute to src
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: If9d6fa8c900b68c4b6fd373f2fc1f9abb83ea917 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4145 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/convolution')
-rw-r--r--src/core/NEON/kernels/convolution/common/activation.hpp37
-rw-r--r--src/core/NEON/kernels/convolution/common/alloc.hpp31
-rw-r--r--src/core/NEON/kernels/convolution/common/arm.hpp39
-rw-r--r--src/core/NEON/kernels/convolution/common/convolution.hpp29
-rw-r--r--src/core/NEON/kernels/convolution/common/padding.hpp91
-rw-r--r--src/core/NEON/kernels/convolution/common/perf.h32
-rw-r--r--src/core/NEON/kernels/convolution/common/qasymm8.hpp54
-rw-r--r--src/core/NEON/kernels/convolution/common/qsymm8.hpp76
-rw-r--r--src/core/NEON/kernels/convolution/common/shims.hpp749
-rw-r--r--src/core/NEON/kernels/convolution/common/tensor.hpp178
-rw-r--r--src/core/NEON/kernels/convolution/common/tensor_utils.hpp46
-rw-r--r--src/core/NEON/kernels/convolution/common/utils.hpp60
-rw-r--r--src/core/NEON/kernels/convolution/depthwise/depthwise.hpp551
-rw-r--r--src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp156
-rw-r--r--src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp291
-rw-r--r--src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp88
16 files changed, 2508 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/convolution/common/activation.hpp b/src/core/NEON/kernels/convolution/common/activation.hpp
new file mode 100644
index 0000000000..0c9b7c1368
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/activation.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace neon_convolution_kernels
+{
+
+enum class ActivationFunction
+{
+ None,
+ ReLU,
+ ReLU6,
+};
+
+}
diff --git a/src/core/NEON/kernels/convolution/common/alloc.hpp b/src/core/NEON/kernels/convolution/common/alloc.hpp
new file mode 100644
index 0000000000..7be3cdaaf5
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/alloc.hpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#ifdef ALLOC_ALIGN
+#define ALLOCATE(x) aligned_alloc(ALLOC_ALIGN, x)
+#else
+#define ALLOCATE(x) malloc(x)
+#endif
diff --git a/src/core/NEON/kernels/convolution/common/arm.hpp b/src/core/NEON/kernels/convolution/common/arm.hpp
new file mode 100644
index 0000000000..b19bf98252
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/arm.hpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/** Sets the macro __arm_any__ if compiling for Aarch32 or Aarch64.
+ * Includes `arm_neon.h` if compiling for either architecture.
+ */
+
+#ifdef __arm__
+#define __arm_any__
+#endif // __arm__
+
+#ifdef __aarch64__
+#define __arm_any__
+#endif // __aarch64__
+
+#ifdef __arm_any__
+#include <arm_neon.h>
+#endif // __arm_any__
diff --git a/src/core/NEON/kernels/convolution/common/convolution.hpp b/src/core/NEON/kernels/convolution/common/convolution.hpp
new file mode 100644
index 0000000000..b1413527c3
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/convolution.hpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+enum PaddingType {
+ PADDING_SAME, PADDING_VALID
+};
diff --git a/src/core/NEON/kernels/convolution/common/padding.hpp b/src/core/NEON/kernels/convolution/common/padding.hpp
new file mode 100644
index 0000000000..b6f95872c0
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/padding.hpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <cstddef>
+
+// Utilities for copying tensor tiles and adding/removing padding.
+namespace padding
+{
+
+/* Copy a tile and apply padding to the output copy.
+ */
+template <typename T>
+void copy_and_pad_tile(
+ unsigned int tile_rows,
+ unsigned int tile_cols,
+ unsigned int n_channels,
+ const T *inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ T* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride,
+ unsigned int pad_top,
+ unsigned int pad_left,
+ unsigned int pad_bottom,
+ unsigned int pad_right,
+ T pad_value=static_cast<T>(0)
+);
+
+/** Copy a tile and remove padding elements in the output.
+ */
+template <unsigned int TileRows, unsigned int TileCols>
+class CopyCropped
+{
+ public:
+ static void execute(
+ size_t size, // Amount of data to copy
+ const void *inptr,
+ size_t in_row_stride,
+ size_t in_col_stride,
+ void *outptr,
+ size_t out_row_stride,
+ size_t out_col_stride,
+ unsigned int pad_top,
+ unsigned int pad_left,
+ unsigned int pad_bottom,
+ unsigned int pad_right
+ );
+};
+
+template <typename T>
+void crop_and_copy_tile(
+ unsigned int tile_rows,
+ unsigned int tile_cols,
+ unsigned int n_channels,
+ const T *inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ T *outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride,
+ unsigned int crop_top,
+ unsigned int crop_left,
+ unsigned int crop_bottom,
+ unsigned int crop_right
+);
+
+}
diff --git a/src/core/NEON/kernels/convolution/common/perf.h b/src/core/NEON/kernels/convolution/common/perf.h
new file mode 100644
index 0000000000..fbae4dcdfa
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/perf.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+/* Prototypes from perf.c */
+
+void start_counter(int fd);
+long long get_counter(int fd);
+long long stop_counter(int fd);
+int open_instruction_counter(void);
+int open_cycle_counter(void);
diff --git a/src/core/NEON/kernels/convolution/common/qasymm8.hpp b/src/core/NEON/kernels/convolution/common/qasymm8.hpp
new file mode 100644
index 0000000000..88ef7327c0
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/qasymm8.hpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include <cstdint>
+
+namespace qasymm8
+{
+
+struct QAsymm8Params
+{
+ uint8_t quantize(float value) const;
+ float dequantize(uint8_t value) const;
+
+ uint8_t offset;
+ float scale;
+};
+
+struct QAsymm8RescaleParams
+{
+ static QAsymm8RescaleParams make_rescale_params(
+ const QAsymm8Params& weight_quant,
+ const QAsymm8Params& input_quant,
+ const QAsymm8Params& output_quant
+ );
+
+ QAsymm8RescaleParams(int32_t shift, int32_t multiplier, float rescale);
+
+ const int32_t shift, multiplier;
+ const float rescale;
+};
+
+}
diff --git a/src/core/NEON/kernels/convolution/common/qsymm8.hpp b/src/core/NEON/kernels/convolution/common/qsymm8.hpp
new file mode 100644
index 0000000000..726a02ccfd
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/qsymm8.hpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include <cstdint>
+#include <vector>
+#include "qasymm8.hpp"
+
+
+namespace qsymm8 {
+
+struct QSymm8Params {
+ int8_t quantize(float value) const;
+ float dequantize(int8_t value) const;
+
+ float scale;
+};
+
+struct QSymm8RescaleParams {
+ static QSymm8RescaleParams
+ make_rescale_params(const QSymm8Params &weight_quant,
+ const QSymm8Params &input_quant,
+ const QSymm8Params &output_quant);
+
+ QSymm8RescaleParams(int32_t shift, int32_t multiplier, float rescale);
+
+ const int32_t shift, multiplier;
+ const float rescale;
+};
+
+struct QSymm8PerChannelParams {
+ int8_t quantize(float value, float scale) const;
+ float dequantize(int8_t value, float scale) const;
+
+ std::vector<float> scales;
+};
+
+struct QSymm8PerChannelRescaleParams {
+ static QSymm8PerChannelRescaleParams
+ make_rescale_params(const QSymm8PerChannelParams &weight_quant,
+ const QSymm8PerChannelParams &input_quant,
+ const QSymm8PerChannelParams &output_quant);
+
+ static QSymm8PerChannelRescaleParams
+ make_rescale_params(const QSymm8PerChannelParams &weight_quant,
+ const qasymm8::QAsymm8Params &input_quant,
+ const qasymm8::QAsymm8Params &output_quant);
+
+ QSymm8PerChannelRescaleParams(std::vector<int32_t>& shift, std::vector<int32_t>& multiplier, std::vector<float>& rescale);
+
+ std::vector<int32_t> shifts, multipliers;
+ std::vector<float> rescales;
+};
+
+} // namespace qsymm8
diff --git a/src/core/NEON/kernels/convolution/common/shims.hpp b/src/core/NEON/kernels/convolution/common/shims.hpp
new file mode 100644
index 0000000000..310bd47b82
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/shims.hpp
@@ -0,0 +1,749 @@
+/*
+ * Copyright (c) 2017 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#ifndef DOXYGEN_SKIP_THIS
+#include <cstdint>
+#endif /* DOXYGEN_SKIP_THIS */
+#include "arm.hpp"
+
+namespace reorder {
+/** Re-order a tensor from NCHW format to NHWC.
+ *
+ * @note The stride parameters are optional and are provided to allow padding in either input or output tensors.
+ *
+ * @param[in] in Input tensor in NCHW format.
+ * @param[out] out Output tensor, to be written in NHWC format.
+ * @param n_batches Number of batches in the tensors.
+ * @param n_channels Number of channels in the tensors
+ * @param n_rows Height of the tensor
+ * @param n_cols Width of the tensor
+ * @param in_batch_stride Stride over batches in the input tensor. If `0` defaults to `n_channels * in_channel_stride`.
+ * @param in_channel_stride Stride over channels in the input tensor. If `0` defaults to `n_rows * in_row_stride`.
+ * @param in_row_stride Stride over rows in the input tensor. If `0` defaults to `n_cols`.
+ * @param out_batch_stride Stride over batches in the output tensor. If `0` defaults to `n_rows * out_row_stride`.
+ * @param out_row_stride Stride over rows in the output tensor. If `0` defaults to `n_cols * out_col_stride`.
+ * @param out_col_stride Stride over columns in the output tensor. If `0` defaults to `n_channels`.
+ */
+template <typename T>
+inline void nchw_to_nhwc(
+ const T* const in,
+ T* const out,
+ const int n_batches,
+ const int n_channels,
+ const int n_rows,
+ const int n_cols,
+ int in_batch_stride=0,
+ int in_channel_stride=0,
+ int in_row_stride=0,
+ int out_batch_stride=0,
+ int out_row_stride=0,
+ int out_col_stride=0
+);
+
+/** Re-order a tensor from NHWC format to NCHW.
+ *
+ * @note The stride parameters are optional and are provided to allow padding in either input or output tensors.
+ *
+ * @param[in] in Input tensor in NHWC format.
+ * @param[out] out Output tensor, to be written in NCHW format.
+ * @param n_batches Number of batches in the tensors.
+ * @param n_rows Height of the tensor
+ * @param n_cols Width of the tensor
+ * @param n_channels Number of channels in the tensors
+ * @param in_batch_stride Stride over batches in the input tensor. If `0` defaults to `n_rows * in_row_stride`.
+ * @param in_row_stride Stride over rows in the input tensor. If `0` defaults to `n_cols * in_col_stride`.
+ * @param in_col_stride Stride over columns in the input tensor. If `0` defaults to `n_channels`.
+ * @param out_batch_stride Stride over batches in the output tensor. If `0` defaults to `n_channels * out_channel_stride`.
+ * @param out_channel_stride Stride over channels in the output tensor. If `0` defaults to `n_rows * out_row_stride`.
+ * @param out_row_stride Stride over rows in the output tensor. If `0` defaults to `n_cols`.
+ */
+template <typename T>
+inline void nhwc_to_nchw(
+ const T* const in, // Input data in NHWC form
+ T* const out, // Output data in NCHW form
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ int in_batch_stride=0,
+ int in_row_stride=0,
+ int in_col_stride=0,
+ int out_batch_stride=0,
+ int out_channel_stride=0,
+ int out_row_stride=0
+);
+
+/** Re-order a weight tensor from [Output feature map x Input feature map x
+ * Height x Width] format to [Height x Width x Input feature map x Output
+ * feature map] format.
+ */
+template <typename T>
+inline void ofm_ifm_h_w_to_h_w_ifm_ofm(
+ const T* const in, // Input in [Output x Input x Height x Width] form
+ T* const out, // Output in [Height x Width x Input x Output] form
+ const int n_output_feature_maps,
+ const int n_input_feature_maps,
+ const int n_rows,
+ const int n_cols,
+ int in_output_feature_map_stride=0,
+ int in_input_feature_map_stride=0,
+ int in_row_stride=0,
+ int out_row_stride=0,
+ int out_col_stride=0,
+ int out_input_feature_map_stride=0
+);
+
+/** Re-order a weight tensor from [Height x Width x Input feature map x Output
+ * feature map] format to [Output feature map x Input feature map x Height x
+ * Width] format.
+ */
+template <typename T>
+inline void h_w_ifm_ofm_to_ofm_ifm_h_w(
+ const T* const in, // Input in [Height x Width x Input x Output] form
+ T* const out, // Output in [Output x Input x Height x Width] form
+ const int n_rows,
+ const int n_cols,
+ const int n_input_feature_maps,
+ const int n_output_feature_maps,
+ int in_row_stride=0,
+ int in_col_stride=0,
+ int in_input_feature_map_stride=0,
+ int out_output_feature_map_stride=0,
+ int out_input_feature_map_stride=0,
+ int out_row_stride=0
+);
+
+/*****************************************************************************/
+/* 32-bit implementation : NCHW -> NHWC
+ */
+template <>
+inline void nchw_to_nhwc(
+ const int32_t* const in,
+ int32_t* const out,
+ const int n_batches,
+ const int n_channels,
+ const int n_rows,
+ const int n_cols,
+ int in_batch_stride,
+ int in_channel_stride,
+ int in_row_stride,
+ int out_batch_stride,
+ int out_row_stride,
+ int out_col_stride
+)
+{
+ typedef int32_t T;
+
+ // Fill in the stride values
+ in_row_stride = (in_row_stride) ? in_row_stride : n_cols;
+ in_channel_stride = (in_channel_stride) ? in_channel_stride
+ : n_rows * in_row_stride;
+ in_batch_stride = (in_batch_stride) ? in_batch_stride
+ : n_channels * in_channel_stride;
+
+ out_col_stride = (out_col_stride) ? out_col_stride : n_channels;
+ out_row_stride = (out_row_stride) ? out_row_stride : n_cols * out_col_stride;
+ out_batch_stride = (out_batch_stride) ? out_batch_stride
+ : n_rows * out_row_stride;
+
+ // Perform the re-ordering
+ for (int n = 0; n < n_batches; n++)
+ {
+ const T* const in_batch = in + n*in_batch_stride;
+ T* const out_batch = out + n*out_batch_stride;
+
+ for (int i = 0; i < n_rows; i++)
+ {
+ const T* const in_row = in_batch + i*in_row_stride;
+ T* const out_row = out_batch + i*out_row_stride;
+
+ int j = 0, j_remaining = n_cols;
+#ifdef __arm_any__
+ for (; j_remaining >= 4; j += 4, j_remaining -= 4)
+ {
+ int c = 0, c_remaining = n_channels;
+ for (; c_remaining >= 4; c += 4, c_remaining -= 4)
+ {
+ // Read 4 channels worth of 4 columns, then zip to produce 4 columns
+ // worth of 4 channels.
+ int32x4_t channel_pixels[4];
+ channel_pixels[0] = vld1q_s32(in_row + (c + 0)*in_channel_stride + j);
+ channel_pixels[1] = vld1q_s32(in_row + (c + 1)*in_channel_stride + j);
+ channel_pixels[2] = vld1q_s32(in_row + (c + 2)*in_channel_stride + j);
+ channel_pixels[3] = vld1q_s32(in_row + (c + 3)*in_channel_stride + j);
+
+ const auto zip1 = vzipq_s32(channel_pixels[0], channel_pixels[2]);
+ const auto zip2 = vzipq_s32(channel_pixels[1], channel_pixels[3]);
+ const auto out_0 = vzipq_s32(zip1.val[0], zip2.val[0]);
+ const auto out_1 = vzipq_s32(zip1.val[1], zip2.val[1]);
+
+ vst1q_s32(out_row + (j + 0)*out_col_stride + c, out_0.val[0]);
+ vst1q_s32(out_row + (j + 1)*out_col_stride + c, out_0.val[1]);
+ vst1q_s32(out_row + (j + 2)*out_col_stride + c, out_1.val[0]);
+ vst1q_s32(out_row + (j + 3)*out_col_stride + c, out_1.val[1]);
+ }
+ for (; c_remaining; c++, c_remaining--)
+ {
+ for (int _j = 0; _j < 4; _j++)
+ {
+ const T* const in_col = in_row + j + _j;
+ T* const out_col = out_row + (j + _j)*out_col_stride;
+ const T* const in_channel = in_col + c*in_channel_stride;
+ out_col[c] = *(in_channel);
+ }
+ }
+ }
+ for (; j_remaining >= 2; j += 2, j_remaining -= 2)
+ {
+ int c = 0, c_remaining = n_channels;
+ for (; c_remaining >= 2; c += 2, c_remaining -= 2)
+ {
+ // Read 2 channels worth of 2 columns, then zip to produce 2 columns
+ // worth of 2 channels.
+ int32x2_t channel_pixels[2];
+ channel_pixels[0] = vld1_s32(in_row + (c + 0)*in_channel_stride + j);
+ channel_pixels[1] = vld1_s32(in_row + (c + 1)*in_channel_stride + j);
+
+ const auto output = vzip_s32(channel_pixels[0], channel_pixels[1]);
+
+ vst1_s32(out_row + (j + 0)*out_col_stride + c, output.val[0]);
+ vst1_s32(out_row + (j + 1)*out_col_stride + c, output.val[1]);
+ }
+ for (; c_remaining; c++, c_remaining--)
+ {
+ for (int _j = 0; _j < 2; _j++)
+ {
+ const T* const in_col = in_row + j + _j;
+ T* const out_col = out_row + (j + _j)*out_col_stride;
+ const T* const in_channel = in_col + c*in_channel_stride;
+ out_col[c] = *(in_channel);
+ }
+ }
+ }
+#endif // __arm_any__
+ for (; j_remaining; j++, j_remaining--)
+ {
+ const T* const in_col = in_row + j;
+ T* const out_col = out_row + j*out_col_stride;
+
+ for (int c = 0; c < n_channels; c++)
+ {
+ const T* const in_channel = in_col + c*in_channel_stride;
+ out_col[c] = *(in_channel);
+ }
+ }
+ }
+ }
+}
+
+template <>
+inline void nchw_to_nhwc(
+ const uint32_t* const in,
+ uint32_t* const out,
+ const int n_batches,
+ const int n_channels,
+ const int n_rows,
+ const int n_cols,
+ int in_batch_stride,
+ int in_channel_stride,
+ int in_row_stride,
+ int out_batch_stride,
+ int out_row_stride,
+ int out_col_stride
+)
+{
+ nchw_to_nhwc(
+ reinterpret_cast<const int32_t*>(in),
+ reinterpret_cast<int32_t*>(out),
+ n_batches, n_channels, n_rows, n_cols,
+ in_batch_stride, in_channel_stride, in_row_stride,
+ out_batch_stride, out_row_stride, out_col_stride
+ );
+}
+
+template <>
+inline void nchw_to_nhwc(
+ const float* const in,
+ float* const out,
+ const int n_batches,
+ const int n_channels,
+ const int n_rows,
+ const int n_cols,
+ int in_batch_stride,
+ int in_channel_stride,
+ int in_row_stride,
+ int out_batch_stride,
+ int out_row_stride,
+ int out_col_stride
+)
+{
+ nchw_to_nhwc(
+ reinterpret_cast<const int32_t*>(in),
+ reinterpret_cast<int32_t*>(out),
+ n_batches, n_channels, n_rows, n_cols,
+ in_batch_stride, in_channel_stride, in_row_stride,
+ out_batch_stride, out_row_stride, out_col_stride
+ );
+}
+
+/*****************************************************************************/
+/* Generic implementation : NCHW -> NHWC
+ */
+template <typename T>
+inline void nchw_to_nhwc(
+ const T* const in,
+ T* const out,
+ const int n_batches,
+ const int n_channels,
+ const int n_rows,
+ const int n_cols,
+ int in_batch_stride,
+ int in_channel_stride,
+ int in_row_stride,
+ int out_batch_stride,
+ int out_row_stride,
+ int out_col_stride
+)
+{
+ // Fill in the stride values
+ in_row_stride = (in_row_stride) ? in_row_stride : n_cols;
+ in_channel_stride = (in_channel_stride) ? in_channel_stride
+ : n_rows * in_row_stride;
+ in_batch_stride = (in_batch_stride) ? in_batch_stride
+ : n_channels * in_channel_stride;
+
+ out_col_stride = (out_col_stride) ? out_col_stride : n_channels;
+ out_row_stride = (out_row_stride) ? out_row_stride : n_cols * out_col_stride;
+ out_batch_stride = (out_batch_stride) ? out_batch_stride
+ : n_rows * out_row_stride;
+
+ // Perform the re-ordering
+ for (int n = 0; n < n_batches; n++)
+ {
+ const T* const in_batch = in + n*in_batch_stride;
+ T* const out_batch = out + n*out_batch_stride;
+
+ for (int i = 0; i < n_rows; i++)
+ {
+ const T* const in_row = in_batch + i*in_row_stride;
+ T* const out_row = out_batch + i*out_row_stride;
+
+ for (int j = 0; j < n_cols; j++)
+ {
+ const T* const in_col = in_row + j;
+ T* const out_col = out_row + j*out_col_stride;
+
+ for (int c = 0; c < n_channels; c++)
+ {
+ const T* const in_channel = in_col + c*in_channel_stride;
+ out_col[c] = *(in_channel);
+ }
+ }
+ }
+ }
+}
+
+/*****************************************************************************/
+/* 32-bit implementation : NHWC -> NCHW
+ */
+template <>
+inline void nhwc_to_nchw(
+ const int32_t* const in, // Input data in NHWC form
+ int32_t* const out, // Output data in NCHW form
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ int in_batch_stride,
+ int in_row_stride,
+ int in_col_stride,
+ int out_batch_stride,
+ int out_channel_stride,
+ int out_row_stride
+)
+{
+ typedef int32_t T;
+
+ // Fill in stride values
+ in_col_stride = (in_col_stride) ? in_col_stride : n_channels;
+ in_row_stride = (in_row_stride) ? in_row_stride : n_cols * in_col_stride;
+ in_batch_stride = (in_batch_stride) ? in_batch_stride
+ : n_rows * in_row_stride;
+
+ out_row_stride = (out_row_stride) ? out_row_stride : n_cols;
+ out_channel_stride = (out_channel_stride) ? out_channel_stride
+ : n_rows * out_row_stride;
+ out_batch_stride = (out_batch_stride) ? out_batch_stride
+ : n_channels * out_channel_stride;
+
+ // Perform the re-ordering
+ // For every batch
+ for (int n = 0; n < n_batches; n++)
+ {
+ const T* const in_batch = in + n*in_batch_stride;
+ T* const out_batch = out + n*out_batch_stride;
+
+ // For every row
+ for (int i = 0; i < n_rows; i++)
+ {
+ const T* const in_i = in_batch + i*in_row_stride;
+ T* const out_i = out_batch + i*out_row_stride;
+
+ // For every column, beginning with chunks of 4
+ int j = 0, j_remaining = n_cols;
+#ifdef __arm_any__
+ for (; j_remaining >= 4; j += 4, j_remaining -=4)
+ {
+ // For every channel, beginning with chunks of 4
+ int c = 0, c_remaining = n_channels;
+ for (; c_remaining >= 4; c += 4, c_remaining -= 4)
+ {
+ // Read 4 columns worth of 4 channels then zip to produce 4 channels
+ // worth of 4 columns.
+ int32x4_t pixel_channels[4];
+ pixel_channels[0] = vld1q_s32(in_i + (j + 0)*in_col_stride + c);
+ pixel_channels[1] = vld1q_s32(in_i + (j + 1)*in_col_stride + c);
+ pixel_channels[2] = vld1q_s32(in_i + (j + 2)*in_col_stride + c);
+ pixel_channels[3] = vld1q_s32(in_i + (j + 3)*in_col_stride + c);
+
+ const auto zip1 = vzipq_s32(pixel_channels[0], pixel_channels[2]);
+ const auto zip2 = vzipq_s32(pixel_channels[1], pixel_channels[3]);
+ const auto out_0 = vzipq_s32(zip1.val[0], zip2.val[0]);
+ const auto out_1 = vzipq_s32(zip1.val[1], zip2.val[1]);
+
+ vst1q_s32(out_i + j + (c + 0)*out_channel_stride, out_0.val[0]);
+ vst1q_s32(out_i + j + (c + 1)*out_channel_stride, out_0.val[1]);
+ vst1q_s32(out_i + j + (c + 2)*out_channel_stride, out_1.val[0]);
+ vst1q_s32(out_i + j + (c + 3)*out_channel_stride, out_1.val[1]);
+ }
+ for (; c_remaining; c++, c_remaining--)
+ {
+ for (int _j = 0; _j < 4; _j++)
+ {
+ const T* const in_j = in_i + (j + _j)*in_col_stride;
+ T* const out_j = out_i + (j + _j);
+
+ const T* const in_channel = in_j + c;
+ T* const out_channel = out_j + c*out_channel_stride;
+ *(out_channel) = *(in_channel);
+ }
+ }
+ }
+ for (; j_remaining >= 2; j += 2, j_remaining -=2)
+ {
+ int c = 0, c_remaining = n_channels;
+ for (; c_remaining >= 2; c += 2, c_remaining -= 2)
+ {
+ // Read 2 columns worth of 2 channels then zip to produce 2 channels
+ // worth of 2 columns.
+ int32x2_t pixel_channels[2];
+ pixel_channels[0] = vld1_s32(in_i + (j + 0)*in_col_stride + c);
+ pixel_channels[1] = vld1_s32(in_i + (j + 1)*in_col_stride + c);
+
+ const auto output = vzip_s32(pixel_channels[0], pixel_channels[1]);
+
+ vst1_s32(out_i + j + (c + 0)*out_channel_stride, output.val[0]);
+ vst1_s32(out_i + j + (c + 1)*out_channel_stride, output.val[1]);
+ }
+ for (; c_remaining; c++, c_remaining--)
+ {
+ for (int _j = 0; _j < 2; _j++)
+ {
+ const T* const in_j = in_i + (j + _j)*in_col_stride;
+ T* const out_j = out_i + (j + _j);
+
+ const T* const in_channel = in_j + c;
+ T* const out_channel = out_j + c*out_channel_stride;
+ *(out_channel) = *(in_channel);
+ }
+ }
+ }
+#endif // __arm_any__
+ for (; j_remaining; j++, j_remaining--)
+ {
+ const T* const in_j = in_i + j*in_col_stride;
+ T* const out_j = out_i + j;
+
+ // For every channel
+ for (int c = 0; c < n_channels; c++)
+ {
+ const T* const in_channel = in_j + c;
+ T* const out_channel = out_j + c*out_channel_stride;
+ *(out_channel) = *(in_channel);
+ }
+ }
+ }
+ }
+}
+
+template <>
+inline void nhwc_to_nchw(
+ const uint32_t* const in, // Input data in NHWC form
+ uint32_t* const out, // Output data in NCHW form
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ int in_batch_stride,
+ int in_row_stride,
+ int in_col_stride,
+ int out_batch_stride,
+ int out_channel_stride,
+ int out_row_stride
+)
+{
+ // Redirect to generic 32-bit implementation
+ nhwc_to_nchw(
+ reinterpret_cast<const int32_t*>(in),
+ reinterpret_cast<int32_t*>(out),
+ n_batches, n_rows, n_cols, n_channels,
+ in_batch_stride, in_row_stride, in_col_stride,
+ out_batch_stride, out_channel_stride, out_row_stride
+ );
+}
+
+template <>
+inline void nhwc_to_nchw(
+ const float* const in, // Input data in NHWC form
+ float* const out, // Output data in NCHW form
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ int in_batch_stride,
+ int in_row_stride,
+ int in_col_stride,
+ int out_batch_stride,
+ int out_channel_stride,
+ int out_row_stride
+)
+{
+ // Redirect to generic 32-bit implementation
+ nhwc_to_nchw(
+ reinterpret_cast<const int32_t*>(in),
+ reinterpret_cast<int32_t*>(out),
+ n_batches, n_rows, n_cols, n_channels,
+ in_batch_stride, in_row_stride, in_col_stride,
+ out_batch_stride, out_channel_stride, out_row_stride
+ );
+}
+
+/*****************************************************************************/
+/* Generic implementation : NHWC -> NCHW
+ */
+template <typename T>
+inline void nhwc_to_nchw(
+ const T* const in, // Input data in NHWC form
+ T* const out, // Output data in NCHW form
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ int in_batch_stride,
+ int in_row_stride,
+ int in_col_stride,
+ int out_batch_stride,
+ int out_channel_stride,
+ int out_row_stride
+)
+{
+ // Fill in stride values
+ in_col_stride = (in_col_stride) ? in_col_stride : n_channels;
+ in_row_stride = (in_row_stride) ? in_row_stride : n_cols * in_col_stride;
+ in_batch_stride = (in_batch_stride) ? in_batch_stride
+ : n_rows * in_row_stride;
+
+ out_row_stride = (out_row_stride) ? out_row_stride : n_cols;
+ out_channel_stride = (out_channel_stride) ? out_channel_stride
+ : n_rows * out_row_stride;
+ out_batch_stride = (out_batch_stride) ? out_batch_stride
+ : n_channels * out_channel_stride;
+
+ // Perform the re-ordering
+ // For every batch
+ for (int n = 0; n < n_batches; n++)
+ {
+ const T* const in_batch = in + n*in_batch_stride;
+ T* const out_batch = out + n*out_batch_stride;
+
+ // For every row
+ for (int i = 0; i < n_rows; i++)
+ {
+ const T* const in_i = in_batch + i*in_row_stride;
+ T* const out_i = out_batch + i*out_row_stride;
+
+ // For every column
+ for (int j = 0; j < n_cols; j++)
+ {
+ const T* const in_j = in_i + j*in_col_stride;
+ T* const out_j = out_i + j;
+
+ // For every channel
+ for (int c = 0; c < n_channels; c++)
+ {
+ const T* const in_channel = in_j + c;
+ T* const out_channel = out_j + c*out_channel_stride;
+ *(out_channel) = *(in_channel);
+ }
+ }
+ }
+ }
+}
+
+/*****************************************************************************/
+/* Generic weight re-order implementation.
+ */
+template <typename T>
+inline void ofm_ifm_h_w_to_h_w_ifm_ofm(
+ const T* const in, // Input in [Output x Input x Height x Width] form
+ T* const out, // Output in [Height x Width x Input x Output] form
+ const int n_output_feature_maps,
+ const int n_input_feature_maps,
+ const int n_rows,
+ const int n_cols,
+ int in_output_feature_map_stride,
+ int in_input_feature_map_stride,
+ int in_row_stride,
+ int out_row_stride,
+ int out_col_stride,
+ int out_input_feature_map_stride
+)
+{
+ // Fill in stride values
+ in_row_stride = (in_row_stride)
+ ? in_row_stride
+ : n_cols;
+ in_input_feature_map_stride = (in_input_feature_map_stride)
+ ? in_input_feature_map_stride
+ : n_rows * in_row_stride;
+ in_output_feature_map_stride = (in_output_feature_map_stride)
+ ? in_output_feature_map_stride
+ : n_input_feature_maps * in_input_feature_map_stride;
+
+ out_input_feature_map_stride = (out_input_feature_map_stride)
+ ? out_input_feature_map_stride
+ : n_output_feature_maps;
+ out_col_stride = (out_col_stride)
+ ? out_col_stride
+ : n_input_feature_maps * out_input_feature_map_stride;
+ out_row_stride = (out_row_stride)
+ ? out_row_stride
+ : n_cols * out_col_stride;
+
+ // Perform the re-ordering
+ for (int i = 0; i < n_rows; i++)
+ {
+ const T* const in_row = in + i * in_row_stride;
+ T* out_row = out + i * out_row_stride;
+
+ for (int j = 0; j < n_cols; j++)
+ {
+ const T* const in_col = in_row + j;
+ T* const out_col = out_row + j * out_col_stride;
+
+ for (int ifm = 0; ifm < n_input_feature_maps; ifm++)
+ {
+ const T* const in_ifm = in_col + ifm * in_input_feature_map_stride;
+ T* const out_ifm = out_col + ifm * out_input_feature_map_stride;
+
+ for (int ofm = 0; ofm < n_output_feature_maps; ofm++)
+ {
+ const T* const in_ofm = in_ifm + ofm * in_output_feature_map_stride;
+ T* const out_ofm = out_ifm + ofm;
+ *(out_ofm) = *(in_ofm);
+ }
+ }
+ }
+ }
+}
+
+/*****************************************************************************/
+/* Generic weight re-order implementation.
+ */
+template <typename T>
+inline void h_w_ifm_ofm_to_ofm_ifm_h_w(
+ const T* const in, // Input in [Height x Width x Input x Output] form
+ T* const out, // Output in [Output x Input x Height x Width] form
+ const int n_rows,
+ const int n_cols,
+ const int n_input_feature_maps,
+ const int n_output_feature_maps,
+ int in_row_stride,
+ int in_col_stride,
+ int in_input_feature_map_stride,
+ int out_output_feature_map_stride,
+ int out_input_feature_map_stride,
+ int out_row_stride
+)
+{
+ // Fill in the stride values
+ in_input_feature_map_stride = (in_input_feature_map_stride)
+ ? in_input_feature_map_stride
+ : n_output_feature_maps;
+ in_col_stride = (in_col_stride)
+ ? in_col_stride
+ : n_input_feature_maps * in_input_feature_map_stride;
+ in_row_stride = (in_row_stride)
+ ? in_row_stride
+ : n_cols * in_col_stride;
+
+ out_row_stride = (out_row_stride)
+ ? out_row_stride
+ : n_cols;
+ out_input_feature_map_stride = (out_input_feature_map_stride)
+ ? out_input_feature_map_stride
+ : n_rows * out_row_stride;
+ out_output_feature_map_stride = (out_output_feature_map_stride)
+ ? out_output_feature_map_stride
+ : n_input_feature_maps * out_input_feature_map_stride;
+
+ // Perform the re-ordering
+ for (int i = 0; i < n_rows; i++)
+ {
+ const T* const in_row = in + i * in_row_stride;
+ T* const out_row = out + i * out_row_stride;
+
+ for (int j = 0; j < n_cols; j++)
+ {
+ const T* const in_col = in_row + j * in_col_stride;
+ T* const out_col = out_row + j;
+
+ for (int ifm = 0; ifm < n_input_feature_maps; ifm++)
+ {
+ const T* const in_ifm = in_col + ifm * in_input_feature_map_stride;
+ T* const out_ifm = out_col + ifm * out_input_feature_map_stride;
+
+ for (int ofm = 0; ofm < n_output_feature_maps; ofm++)
+ {
+ const T* const in_ofm = in_ifm + ofm;
+ T* const out_ofm = out_ifm + ofm * out_output_feature_map_stride;
+ *(out_ofm) = *(in_ofm);
+ }
+ }
+ }
+ }
+}
+
+} // namespace reorder
diff --git a/src/core/NEON/kernels/convolution/common/tensor.hpp b/src/core/NEON/kernels/convolution/common/tensor.hpp
new file mode 100644
index 0000000000..7738cdb349
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/tensor.hpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017-2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include <cstdlib>
+#include <random>
+
+#include "alloc.hpp"
+
+enum TensorOrder
+{
+ NHWC, ///< [Batch x Height x Width x Channels]
+ NCHW, ///< [Batch x Channels x Height x Width]
+};
+
+struct Tensor4DShape
+{
+ int n_batches, n_rows, n_cols, n_channels;
+ TensorOrder ordering;
+
+ // Create a new tensor with the default (NHWC) ordering
+ inline Tensor4DShape(
+ const int n_batches,
+ const int n_rows,
+ const int n_cols,
+ const int n_channels,
+ const TensorOrder ordering=NHWC
+ ) : n_batches(n_batches),
+ n_rows(n_rows),
+ n_cols(n_cols),
+ n_channels(n_channels),
+ ordering(ordering)
+ {
+ }
+
+ inline int index(const int n, const int i, const int j, const int c) const
+ {
+ if (this->ordering == NHWC)
+ {
+ return ((n*this->n_rows + i)*this->n_cols + j)*this->n_channels + c;
+ }
+ else // NCHW
+ {
+ return ((n*this->n_channels + c)*this->n_rows + i)*this->n_cols + j;
+ }
+ }
+
+ inline int size() const
+ {
+ return n_batches * n_rows * n_cols * n_channels;
+ }
+
+ inline bool TestEq(const Tensor4DShape& other) const
+ {
+ return (n_batches == other.n_batches &&
+ n_rows == other.n_rows &&
+ n_cols == other.n_cols &&
+ n_channels == other.n_channels);
+ }
+};
+
+
+enum WeightOrder
+{
+ HWIO, ///< [Height x Width x Input channels x Output channels]
+ OIHW, ///< [Output channels x Input channels x Height x Width]
+};
+
+struct KernelShape
+{
+ int n_output_channels, n_rows, n_cols, n_input_channels;
+ WeightOrder ordering;
+
+ inline KernelShape(
+ const int n_output_channels,
+ const int n_rows,
+ const int n_cols,
+ const int n_input_channels,
+ const WeightOrder ordering=HWIO
+ ) : n_output_channels(n_output_channels),
+ n_rows(n_rows),
+ n_cols(n_cols),
+ n_input_channels(n_input_channels),
+ ordering(ordering)
+ {
+ }
+
+ inline int index(int oc, int i, int j, int ic) const
+ {
+ if (this->ordering == HWIO)
+ {
+ return ((i*this->n_cols + j)*this->n_input_channels + ic)*this->n_output_channels + oc;
+ }
+ else // OIHW
+ {
+ return ((oc*this->n_input_channels + ic)*this->n_rows + i)*this->n_cols + j;
+ }
+ }
+
+ inline int size(void) const
+ {
+ return n_output_channels * n_rows * n_cols * n_input_channels;
+ }
+};
+
+
+template <typename ShapeT, typename T>
+class Tensor4D final
+{
+ public:
+ Tensor4D(ShapeT shape) :
+ shape(shape),
+ _data(reinterpret_cast<T*>(ALLOCATE(size_bytes())))
+ {
+ Clear();
+ }
+
+ Tensor4D(const Tensor4D<ShapeT, T>&) = delete;
+ Tensor4D operator=(const Tensor4D<ShapeT, T>&) = delete;
+
+ ~Tensor4D() {
+ free(_data);
+ }
+
+ inline T* ptr() const {
+ return _data;
+ }
+
+ inline size_t size_bytes() const {
+ return shape.size() * sizeof(T);
+ }
+
+ /* Extract an element of the tensor.
+ *
+ * If the shape is a Tensor4DShape then the index is given as batch, row,
+ * column and channel. If the shape is a KernelShape then the index is
+ * given as output channel, row, column and input channel.
+ */
+ inline T& element(const int a, const int b, const int c, const int d) const
+ {
+ return _data[shape.index(a, b, c, d)];
+ }
+
+ inline void Clear() {
+ Fill(static_cast<T>(0));
+ }
+
+ inline void Fill(T val) {
+ for (int i = 0; i < shape.size(); i++)
+ _data[i] = val;
+ }
+
+ const ShapeT shape;
+
+ private:
+ T* const _data;
+};
diff --git a/src/core/NEON/kernels/convolution/common/tensor_utils.hpp b/src/core/NEON/kernels/convolution/common/tensor_utils.hpp
new file mode 100644
index 0000000000..82619f4799
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/tensor_utils.hpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include "tensor.hpp"
+
+// Methods to print tensors and weights
+void PrintTensor(const Tensor4D<Tensor4DShape, float>& tensor);
+void PrintWeights(const Tensor4D<KernelShape, float>& weights);
+
+// Test the equivalence of two tensors
+// Counts the instances that |a - b|/|a| > max_err
+bool CmpTensors(
+ const Tensor4D<Tensor4DShape, float>& a,
+ const Tensor4D<Tensor4DShape, float>& b,
+ const float max_err=0.0f
+);
+
+// Fill the tensor with a test pattern
+void TestPattern(Tensor4D<Tensor4DShape, float>& tensor);
+void TestPattern(Tensor4D<KernelShape, float>& weights);
+
+// Fill the tensor with random values
+void Randomise(Tensor4D<Tensor4DShape, float>& tensor, const int seed=0);
+void Randomise(Tensor4D<KernelShape, float>& weights, const int seed=0);
diff --git a/src/core/NEON/kernels/convolution/common/utils.hpp b/src/core/NEON/kernels/convolution/common/utils.hpp
new file mode 100644
index 0000000000..b7a9517c65
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/common/utils.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017-2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <limits>
+
+void PrintMatrix(const float *const m, const int M, const int N, const int row_stride);
+
+constexpr inline int iceildiv(const int a, const int b)
+{
+ return (a + b - 1) / b;
+}
+
+template <typename T>
+inline T roundup(const T a, const T b)
+{
+ return b * iceildiv(a, b);
+}
+
+template<typename T>
+struct TypeBounds
+{
+ static constexpr T lower() noexcept { return std::numeric_limits<T>::has_infinity
+ ? -std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::lowest(); };
+ static constexpr T upper() noexcept { return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max(); };
+};
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template<>
+struct TypeBounds<__fp16>
+{
+ static constexpr __fp16 lower() noexcept { return -std::numeric_limits<float>::infinity(); };
+ static constexpr __fp16 upper() noexcept { return std::numeric_limits<float>::infinity(); }
+};
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise.hpp
new file mode 100644
index 0000000000..70d6689731
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise.hpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2018-2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <arm_neon.h>
+#include "activation.hpp"
+#include "padding.hpp"
+
+namespace depthwise
+{
+
+namespace nck = neon_convolution_kernels;
+
+class IDepthwiseConvolution
+{
+ public:
+ virtual ~IDepthwiseConvolution() = default;
+
+ virtual int output_size(
+ int dim_size,
+ unsigned int padding_before,
+ unsigned int padding_after
+ ) const = 0;
+
+ /* Set input tensor and stride. */
+ virtual void set_input(const void *inptr) = 0;
+ virtual void set_input(const void *inptr, int column_stride) = 0;
+ virtual void set_input(const void *inptr, int row_stride, int column_stride) = 0;
+ virtual void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) = 0;
+
+ /* Set output tensor and stride. */
+ virtual void set_output(void *outptr) = 0;
+ virtual void set_output(void *outptr, int column_stride) = 0;
+ virtual void set_output(void *outptr, int row_stride, int column_stride) = 0;
+ virtual void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) = 0;
+
+ /* Weights and biases are re-ordered to improve memory access patterns. Use
+ * these methods to determine the size of the re-pack buffer and to set the
+ * address (and implicitly reorder the weights and biases into) the buffer.
+ */
+ virtual size_t get_packed_params_size(void) const = 0;
+ virtual void set_packed_params_buffer(void *) = 0;
+
+ virtual void pack_params(const void *weights, const void *biases=nullptr) const = 0;
+ virtual void pack_params(void *buffer, const void *weights, const void *biases=nullptr) const = 0;
+ virtual void pack_params(
+ void *buffer,
+ const void* weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const = 0;
+
+ /* Working space is used to pad tensors on the fly. Before running any
+ * inference check the amount of space required, allocate and provide a
+ * pointer to the convolution engine.
+ */
+ virtual size_t get_working_space_size(unsigned int nthreads=1) const = 0;
+ virtual void set_working_space(void *) = 0;
+
+ virtual unsigned int get_window(void) const = 0;
+ virtual void run(
+ unsigned int start,
+ unsigned int stop,
+ unsigned int threadid=0
+ ) = 0;
+};
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols,
+ typename TIn, typename TBias, typename TOut,
+ typename Derived
+>
+class DepthwiseConvolutionBase : public IDepthwiseConvolution
+{
+ public:
+ // Information about the specific convolution instance
+ using InputType = TIn;
+ using BiasType = TBias;
+ using OutputType = TOut;
+ static constexpr int output_tile_rows = OutputTileRows;
+ static constexpr int output_tile_cols = OutputTileCols;
+ static constexpr int kernel_rows = KernelRows;
+ static constexpr int kernel_cols = KernelCols;
+ static constexpr int stride_rows = StrideRows;
+ static constexpr int stride_cols = StrideCols;
+ static constexpr int inner_tile_rows = stride_rows * (output_tile_rows - 1) + kernel_rows;
+ static constexpr int inner_tile_cols = stride_cols * (output_tile_cols - 1) + kernel_cols;
+
+ /** Create a new depthwise convolution engine.
+ *
+ * @param[in] n_batches Number of batches tensors.
+ * @param[in] n_input_rows Number of rows in input tensor.
+ * @param[in] n_input_cols Number of columns in input tensor.
+ * @param[in] n_channels Number of channels in input and output tensors.
+ */
+ DepthwiseConvolutionBase(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ /** Create a new depthwise convolution engine.
+ *
+ * @param[in] n_batches Number of batches tensors.
+ * @param[in] n_input_rows Number of rows in input tensor.
+ * @param[in] n_input_cols Number of columns in input tensor.
+ * @param[in] n_channels Number of channels in input and output tensors.
+ */
+ DepthwiseConvolutionBase(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ // Cannot copy or move a DepthwiseConvolution.
+ DepthwiseConvolutionBase(DepthwiseConvolutionBase&) = delete;
+ DepthwiseConvolutionBase operator=(DepthwiseConvolutionBase&) = delete;
+
+ /* Set input tensor and stride. */
+ void set_input(const void *inptr) override;
+ void set_input(const void *inptr, int column_stride) override;
+ void set_input(const void *inptr, int row_stride, int column_stride) override;
+ void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) override;
+
+ /* Set output tensor and stride. */
+ void set_output(void *outptr) override;
+ void set_output(void *outptr, int column_stride) override;
+ void set_output(void *outptr, int row_stride, int column_stride) override;
+ void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) override;
+
+ /** Get the number of output rows/columns.
+ *
+ * @param[in] dim_size Number of elements in the dimension (rows/columns)
+ * @param[in] same_padding True if the padding is SAME, otherwise false.
+ */
+ static int get_output_size(
+ int dim_size, unsigned int padding_before, unsigned int padding_after
+ );
+
+ int output_size(
+ int dim_size, unsigned int padding_before, unsigned int padding_after
+ ) const override;
+
+ /* Determine how much memory is required to store the packed weights and
+ * biases.
+ */
+ size_t get_packed_params_size(void) const override;
+
+ /* Set the buffer for the packed weights and biases, and perform the
+ * packing.
+ */
+ void set_packed_params_buffer(void *buffer) override;
+
+ void pack_params(const void *weights, const void *biases=nullptr) const override;
+
+ void pack_params(
+ void *buffer,
+ const void *weights,
+ const void *biases=nullptr
+ ) const override;
+
+ void pack_params(
+ void *buffer,
+ const void *weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const override;
+
+ /** Query the amount of working space required.
+ * @param[in] The largest number of threads which will be used to execute
+ * the kernel.
+ */
+ size_t get_working_space_size(unsigned int n_threads=1) const override;
+
+ /** Set the working space buffer.
+ */
+ void set_working_space(void *buffer) override;
+
+ /** Get the window of work to be performed by an instance of the operator.
+ */
+ unsigned int get_window(void) const override;
+
+ /** Perform a portion of the work associated with the operator.
+ *
+ * Will perform the window of work described by $[start, stop)$.
+ *
+ * @param[in] start Start of the window of work to perform.
+ * @param[in] stop End of the work to perform.
+ * @param[in] ID of the thread performing the work.
+ */
+ void run(
+ unsigned int start,
+ unsigned int stop,
+ unsigned int threadid=0
+ ) override;
+
+ protected:
+ /** Get the value to use to pad the tensor.
+ */
+ TIn _input_padding_value(void) const;
+
+ /** Implementation of the parameter packing.
+ */
+ void _pack_params(
+ void *buffer,
+ const void *weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const;
+
+ /** Process a tile-row of the tensors.
+ */
+ void process_tile_row(
+ unsigned int threadid,
+ int n_channels,
+ const void* packed_params,
+ const InputType* inptr,
+ OutputType* outptr,
+ int row_pad_in_top,
+ int row_pad_in_left,
+ int row_pad_in_bottom,
+ int row_pad_out_bottom,
+ int n_tiles,
+ int n_input_cols,
+ int n_output_cols
+ );
+
+ /** Process a single tile of the tensor.
+ *
+ * This method will apply input/output padding (if required) and call the
+ * depthwise tile implementation.
+ */
+ void process_tile(
+ unsigned int threadid,
+ int n_channels,
+ const void* packed_params,
+ const InputType* inptr,
+ OutputType* outptr,
+ int pad_in_top,
+ int pad_in_left,
+ int pad_in_bottom,
+ int pad_in_right,
+ int pad_out_bottom,
+ int pad_out_right
+ );
+
+ /** Perform depthwise convolution on a single tile.
+ */
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const InputType* inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ OutputType* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride
+ );
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const InputType* inptrs[inner_tile_rows][inner_tile_cols],
+ OutputType* outptrs[output_tile_rows][output_tile_cols]
+ );
+
+ int n_channels(void) const;
+
+ private:
+ // Member variables of instances of a convolution engine.
+ const InputType* _input;
+ OutputType* _output;
+ void* _packed_parameters;
+ void* _working_space; // Per-thread working space
+ const int _n_batches, _n_input_rows, _n_input_cols, _n_channels,
+ _n_output_rows, _n_output_cols, _n_tile_rows, _n_tile_cols;
+ const unsigned int _padding_top, _padding_left, _padding_bottom, _padding_right;
+ const nck::ActivationFunction _activation;
+
+ // Stride information for a convolution instance
+ int _input_col_stride, _input_row_stride, _input_batch_stride;
+ int _output_col_stride, _output_row_stride, _output_batch_stride;
+
+ // Methods for getting access to working space
+ size_t _get_input_working_space_size(void) const;
+ size_t _get_output_working_space_size(void) const;
+
+ void *_get_input_working_space(unsigned int threadid) const;
+ void *_get_output_working_space(unsigned int threadid) const;
+};
+
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols,
+ typename TIn, typename TBias, typename TOut
+>
+class DepthwiseConvolution : public DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ TIn, TBias, TOut,
+ DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ TIn, TBias, TOut
+ >
+>
+{
+ using Base = DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ TIn, TBias, TOut,
+ DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ TIn, TBias, TOut
+ > >;
+ friend Base;
+ using InputType = typename Base::InputType;
+ using OutputType = typename Base::OutputType;
+
+ public:
+ using Base::DepthwiseConvolutionBase;
+
+ protected:
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const TIn* inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ TOut* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride
+ );
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const InputType* inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ OutputType* outptrs[Base::output_tile_rows][Base::output_tile_cols]
+ );
+};
+
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols
+>
+class DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float, float, float
+> : public DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float, float, float,
+ DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float, float, float
+ >
+>
+{
+ using Base = DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float, float, float,
+ DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float, float, float
+ > >;
+ friend Base;
+ using InputType = typename Base::InputType;
+ using OutputType = typename Base::OutputType;
+
+ public:
+ DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ protected:
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const float* inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ float* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride
+ );
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const float* inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ float* outptrs[Base::output_tile_rows][Base::output_tile_cols]
+ );
+};
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols
+>
+class DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float16_t, float16_t, float16_t
+> : public DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float16_t, float16_t, float16_t,
+ DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float16_t, float16_t, float16_t
+ >
+>
+{
+ using Base = DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float16_t, float16_t, float16_t,
+ DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ float16_t, float16_t, float16_t
+ > >;
+ friend Base;
+ using InputType = typename Base::InputType;
+ using OutputType = typename Base::OutputType;
+
+ public:
+ DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ protected:
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const float16_t* inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ float16_t* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride
+ );
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const float16_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ float16_t* outptrs[Base::output_tile_rows][Base::output_tile_cols]
+ );
+};
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+} // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp
new file mode 100644
index 0000000000..1bae815613
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <deque>
+#include <functional>
+#include <memory>
+
+#include "depthwise.hpp"
+
+namespace depthwise
+{
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols,
+ typename TIn, typename TBias, typename TOut
+>
+class DilatedDepthwiseConvolution : public IDepthwiseConvolution
+{
+ public:
+ /** Create a new dilated depthwise convolution engine.
+ */
+ DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ // Cannot copy or move a DilatedDepthwiseConvolution.
+ DilatedDepthwiseConvolution(DilatedDepthwiseConvolution&) = delete;
+ DilatedDepthwiseConvolution operator=(DilatedDepthwiseConvolution&) = delete;
+
+ /* Set input tensor and stride. */
+ void set_input(const void *inptr) override;
+ void set_input(const void *inptr, int column_stride) override;
+ void set_input(const void *inptr, int row_stride, int column_stride) override;
+ void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) override;
+
+ /* Set output tensor and stride. */
+ void set_output(void *outptr) override;
+ void set_output(void *outptr, int column_stride) override;
+ void set_output(void *outptr, int row_stride, int column_stride) override;
+ void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) override;
+
+ static int get_output_size(
+ int dim_size,
+ unsigned int padding_before,
+ unsigned int padding_after,
+ int dilation_factor
+ );
+
+ int output_size(
+ int dim_size, unsigned int padding_before, unsigned int padding_after
+ ) const override;
+
+ /* Weights and biases are re-ordered to improve memory access patterns. Use
+ * these methods to determine the size of the re-pack buffer and to set the
+ * address (and implicitly reorder the weights and biases into) the buffer.
+ */
+ size_t get_packed_params_size(void) const override;
+ void set_packed_params_buffer(void *) override;
+
+ void pack_params(const void *weights, const void *biases=nullptr) const override;
+ void pack_params(void *buffer, const void *weights, const void *biases=nullptr) const override;
+ void pack_params(
+ void *buffer,
+ const void* weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const override;
+
+ /* Working space is used to pad tensors on the fly. Before running any
+ * inference check the amount of space required, allocate and provide a
+ * pointer to the convolution engine.
+ */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *) override;
+
+ unsigned int get_window(void) const override;
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ /** Protected constructor which also accepts a function to construct a new
+ * subconvolution
+ */
+ DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right,
+ std::function<IDepthwiseConvolution *(int, int, int, int, int, int, nck::ActivationFunction, unsigned int, unsigned int, unsigned int, unsigned int)> subconvfn
+ );
+
+ const int _dilation_factor;
+ const int _n_input_rows, _n_input_cols, _n_channels;
+ const int _padding_top, _padding_left;
+ const int _n_output_rows, _n_output_cols;
+
+ /* Dilated depthwise convolution is performed through repeated calls to
+ * non-dilated convolutions. If the dilation factor is $n$, then we perform
+ * $(n + 1)^2$ depthwise convolutions.
+ */
+ using BaseDepthwise = DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ TIn, TBias, TOut
+ >;
+ std::deque<std::deque<std::unique_ptr<IDepthwiseConvolution>>> _convs;
+};
+
+} // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp
new file mode 100644
index 0000000000..4343f6ad45
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2018-2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include "depthwise.hpp"
+#include "qasymm8.hpp"
+#include "qsymm8.hpp"
+#pragma once
+
+using namespace neon_convolution_kernels;
+using namespace qasymm8;
+
+inline int32x4_t saturating_doubling_high_mul(const int32x4_t& a, const int32x4_t& b)
+{
+ return vqrdmulhq_s32(a, b);
+}
+
+inline int32x4_t saturating_doubling_high_mul(const int32x4_t& a, const int32_t& b)
+{
+ return vqrdmulhq_n_s32(a, b);
+}
+
+inline int32_t saturating_doubling_high_mul(const int32_t& a, const int32_t& b)
+{
+ return vget_lane_s32(vqrdmulh_n_s32(vdup_n_s32(a), b), 0);
+}
+
+inline int32x4_t rounding_divide_by_exp2(const int32x4_t& x, const int32x4_t shift)
+{
+ const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift), 31);
+ const int32x4_t fixed = vqaddq_s32(x, fixup);
+ return vrshlq_s32(fixed, shift);
+}
+
+inline int32x4_t rounding_divide_by_exp2(const int32x4_t& x, const int exponent)
+{
+ const int32x4_t shift = vdupq_n_s32(-exponent);
+ const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift), 31);
+ const int32x4_t fixed = vqaddq_s32(x, fixup);
+ return vrshlq_s32(fixed, shift);
+}
+
+inline int32x2_t rounding_divide_by_exp2(const int32x2_t& x, const int exponent)
+{
+ const int32x2_t shift = vdup_n_s32(-exponent);
+ const int32x2_t fixup = vshr_n_s32(vand_s32(x, shift), 31);
+ const int32x2_t fixed = vqadd_s32(x, fixup);
+ return vrshl_s32(fixed, shift);
+}
+
+inline int32_t rounding_divide_by_exp2(const int32_t& x, const int exponent)
+{
+ const int32x2_t xs = vdup_n_s32(x);
+ return vget_lane_s32(rounding_divide_by_exp2(xs, exponent), 0);
+}
+
+namespace depthwise
+{
+
+namespace nck = neon_convolution_kernels;
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols
+>
+class QAsymm8DepthwiseConvolution : public DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ uint8_t, int32_t, uint8_t,
+ QAsymm8DepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols>
+>
+{
+ using Base = DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ uint8_t, int32_t, uint8_t,
+ QAsymm8DepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols>
+ >;
+ friend Base;
+ using InputType = typename Base::InputType;
+ using OutputType = typename Base::OutputType;
+
+ public:
+ QAsymm8DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ QAsymm8DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ QAsymm8DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ const qasymm8::QAsymm8RescaleParams& rescale_parameters,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ QAsymm8DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ const qasymm8::QAsymm8RescaleParams& rescale_parameters,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ protected:
+ uint8_t _input_padding_value(void) const;
+
+ void _pack_params(
+ void *buffer,
+ const void *weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const;
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const uint8_t* inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ uint8_t* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride
+ );
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const uint8_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ uint8_t* outptrs[Base::output_tile_rows][Base::output_tile_cols]
+ );
+
+ private:
+ // Quantization parameters
+ const qasymm8::QAsymm8Params _weights_quant, _inputs_quant, _output_quant;
+ const qasymm8::QAsymm8RescaleParams rescale_parameters;
+};
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols
+>
+class QSymm8HybridPerChannelDepthwiseConvolution : public DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ uint8_t, int32_t, uint8_t,
+ QSymm8HybridPerChannelDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols>
+>
+{
+ using Base = DepthwiseConvolutionBase<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ uint8_t, int32_t, uint8_t,
+ QSymm8HybridPerChannelDepthwiseConvolution<OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows, StrideCols>
+ >;
+ friend Base;
+ using InputType = typename Base::InputType;
+ using OutputType = typename Base::OutputType;
+
+ public:
+ QSymm8HybridPerChannelDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ const qsymm8::QSymm8PerChannelParams& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ QSymm8HybridPerChannelDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ const qsymm8::QSymm8PerChannelParams& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ const qsymm8::QSymm8PerChannelRescaleParams& rescale_parameters,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ size_t get_packed_params_size(void) const override
+ {
+ return this->n_channels() * (sizeof(int8_t)*KernelRows*KernelCols + 3*sizeof(int32_t));
+
+ }
+
+ protected:
+ uint8_t _input_padding_value(void) const;
+
+ void _pack_params(
+ void *buffer,
+ const void *weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const;
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const uint8_t* inptr,
+ unsigned int in_row_stride,
+ unsigned int in_col_stride,
+ uint8_t* outptr,
+ unsigned int out_row_stride,
+ unsigned int out_col_stride
+ );
+
+ template <nck::ActivationFunction Activation>
+ void execute_tile(
+ int n_channels,
+ const void* packed_params,
+ const uint8_t* inptrs[Base::inner_tile_rows][Base::inner_tile_cols],
+ uint8_t* outptrs[Base::output_tile_rows][Base::output_tile_cols]
+ );
+
+ private:
+ // Quantization parameters
+ const qsymm8::QSymm8PerChannelParams _weights_quant;
+ const qasymm8::QAsymm8Params _input_quant, _output_quant;
+ const qsymm8::QSymm8PerChannelRescaleParams _rescale_parameters;
+};
+
+} // namespace depthwise
diff --git a/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp b/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp
new file mode 100644
index 0000000000..a11b0981c9
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include "depthwise_dilated.hpp"
+#include "depthwise_quantized.hpp"
+
+namespace depthwise {
+
+template <unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols>
+class QAsymm8DilatedDepthwiseConvolution
+ : public DilatedDepthwiseConvolution<
+ OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows,
+ StrideCols, uint8_t, int32_t, uint8_t> {
+public:
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ const qasymm8::QAsymm8RescaleParams &rescale_parameters,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ const qasymm8::QAsymm8RescaleParams& rescale_parameters,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+};
+
+} // namespace depthwise