aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-24 14:56:34 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-09 09:31:37 +0000
commit30271c779c36a2abe6995c4454674d92bbc1f91f (patch)
tree531257ff87cf2cb8d6f3b8da0abe3e6cb77a2a0e /arm_compute
parent30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3 (diff)
downloadComputeLibrary-30271c779c36a2abe6995c4454674d92bbc1f91f.tar.gz
COMPMID-2156: Optimized dilated convolution for NEON.
Change-Id: I3a8abe8cc9637c8983d9bd69dcbaee1a15eac8d0 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1492 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp41
-rw-r--r--arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp156
-rw-r--r--arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp38
-rw-r--r--arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp88
-rw-r--r--arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp32
-rw-r--r--arm_compute/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp295
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h119
-rw-r--r--arm_compute/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.h39
9 files changed, 782 insertions, 28 deletions
diff --git a/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp
index e0cb616a3d..a4a833d90a 100644
--- a/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp
@@ -25,8 +25,8 @@
#pragma once
#include <arm_neon.h>
-#include "arm_compute/core/NEON/kernels/convolution/common/activation.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/padding.hpp"
+#include "activation.hpp"
+#include "padding.hpp"
namespace depthwise
{
@@ -127,6 +127,23 @@ class DepthwiseConvolutionBase : public IDepthwiseConvolution
unsigned int padding_right
);
+ /** Create a new depthwise convolution engine.
+ *
+ * @param[in] n_batches Number of batches tensors.
+ * @param[in] n_input_rows Number of rows in input tensor.
+ * @param[in] n_input_cols Number of columns in input tensor.
+ * @param[in] n_channels Number of channels in input and output tensors.
+ */
+ DepthwiseConvolutionBase(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
// Cannot copy or move a DepthwiseConvolution.
DepthwiseConvolutionBase(DepthwiseConvolutionBase&) = delete;
DepthwiseConvolutionBase operator=(DepthwiseConvolutionBase&) = delete;
@@ -417,6 +434,16 @@ class DepthwiseConvolution<
unsigned int padding_right
);
+ DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
protected:
template <nck::ActivationFunction Activation>
void execute_tile(
@@ -488,6 +515,16 @@ class DepthwiseConvolution<
unsigned int padding_right
);
+ DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
protected:
template <nck::ActivationFunction Activation>
void execute_tile(
diff --git a/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp
new file mode 100644
index 0000000000..e0d7f0c7f1
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include <deque>
+#include <functional>
+#include <memory>
+
+#include "depthwise.hpp"
+
+namespace depthwise
+{
+
+template <
+ unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols,
+ typename TIn, typename TBias, typename TOut
+>
+class DilatedDepthwiseConvolution : public IDepthwiseConvolution
+{
+ public:
+ /** Create a new dilated depthwise convolution engine.
+ */
+ DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ // Cannot copy or move a DilatedDepthwiseConvolution.
+ DilatedDepthwiseConvolution(DilatedDepthwiseConvolution&) = delete;
+ DilatedDepthwiseConvolution operator=(DilatedDepthwiseConvolution&) = delete;
+
+ /* Set input tensor and stride. */
+ void set_input(const void *inptr) override;
+ void set_input(const void *inptr, int column_stride) override;
+ void set_input(const void *inptr, int row_stride, int column_stride) override;
+ void set_input(const void *inptr, int batch_stride, int row_stride, int column_stride) override;
+
+ /* Set output tensor and stride. */
+ void set_output(void *outptr) override;
+ void set_output(void *outptr, int column_stride) override;
+ void set_output(void *outptr, int row_stride, int column_stride) override;
+ void set_output(void *outptr, int batch_stride, int row_stride, int column_stride) override;
+
+ static int get_output_size(
+ int dim_size,
+ unsigned int padding_before,
+ unsigned int padding_after,
+ int dilation_factor
+ );
+
+ int output_size(
+ int dim_size, unsigned int padding_before, unsigned int padding_after
+ ) const override;
+
+ /* Weights and biases are re-ordered to improve memory access patterns. Use
+ * these methods to determine the size of the re-pack buffer and to set the
+ * address (and implicitly reorder the weights and biases into) the buffer.
+ */
+ size_t get_packed_params_size(void) const override;
+ void set_packed_params_buffer(void *) override;
+
+ void pack_params(const void *weights, const void *biases=nullptr) const override;
+ void pack_params(void *buffer, const void *weights, const void *biases=nullptr) const override;
+ void pack_params(
+ void *buffer,
+ const void* weights,
+ unsigned int weight_row_stride,
+ unsigned int weight_col_stride,
+ const void *biases=nullptr
+ ) const override;
+
+ /* Working space is used to pad tensors on the fly. Before running any
+ * inference check the amount of space required, allocate and provide a
+ * pointer to the convolution engine.
+ */
+ size_t get_working_space_size(unsigned int nthreads=1) const override;
+ void set_working_space(void *) override;
+
+ unsigned int get_window(void) const override;
+ void run(unsigned int start, unsigned int stop, unsigned int threadid=0) override;
+
+ protected:
+ /** Protected constructor which also accepts a function to construct a new
+ * subconvolution
+ */
+ DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right,
+ std::function<IDepthwiseConvolution *(int, int, int, int, int, int, nck::ActivationFunction, unsigned int, unsigned int, unsigned int, unsigned int)> subconvfn
+ );
+
+ const int _dilation_factor;
+ const int _n_input_rows, _n_input_cols, _n_channels;
+ const int _padding_top, _padding_left;
+ const int _n_output_rows, _n_output_cols;
+
+ /* Dilated depthwise convolution is performed through repeated calls to
+ * non-dilated convolutions. If the dilation factor is $n$, then we perform
+ * $(n + 1)^2$ depthwise convolutions.
+ */
+ using BaseDepthwise = DepthwiseConvolution<
+ OutputTileRows, OutputTileCols,
+ KernelRows, KernelCols,
+ StrideRows, StrideCols,
+ TIn, TBias, TOut
+ >;
+ std::deque<std::deque<std::unique_ptr<IDepthwiseConvolution>>> _convs;
+};
+
+} // namespace depthwise
diff --git a/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp
index e34023faf1..b65ced6f35 100644
--- a/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,8 +23,8 @@
*/
#pragma once
-#include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/qasymm8.hpp"
+#include "depthwise.hpp"
+#include "qasymm8.hpp"
namespace depthwise
{
@@ -70,6 +70,33 @@ class QAsymm8DepthwiseConvolution : public DepthwiseConvolutionBase<
QAsymm8DepthwiseConvolution(
int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ QAsymm8DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& weight_quantisation,
+ const qasymm8::QAsymm8Params& input_quantisation,
+ const qasymm8::QAsymm8Params& output_quantisation,
+ const qasymm8::QAsymm8RescaleParams& rescale_parameters,
+ unsigned int padding_top,
+ unsigned int padding_left,
+ unsigned int padding_bottom,
+ unsigned int padding_right
+ );
+
+ QAsymm8DepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int n_output_rows, int n_output_cols,
nck::ActivationFunction activation,
const qasymm8::QAsymm8Params& weight_quantisation,
const qasymm8::QAsymm8Params& input_quantisation,
@@ -82,6 +109,11 @@ class QAsymm8DepthwiseConvolution : public DepthwiseConvolutionBase<
);
protected:
+ static nck::ActivationFunction get_activation_fn(
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params& output_quantisation
+ );
+
uint8_t _input_padding_value(void) const;
void _pack_params(
diff --git a/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp
new file mode 100644
index 0000000000..cf1c6f581f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include "depthwise_dilated.hpp"
+#include "depthwise_quantized.hpp"
+
+namespace depthwise {
+
+template <unsigned int OutputTileRows, unsigned int OutputTileCols,
+ unsigned int KernelRows, unsigned int KernelCols,
+ unsigned int StrideRows, unsigned int StrideCols>
+class QAsymm8DilatedDepthwiseConvolution
+ : public DilatedDepthwiseConvolution<
+ OutputTileRows, OutputTileCols, KernelRows, KernelCols, StrideRows,
+ StrideCols, uint8_t, int32_t, uint8_t> {
+public:
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ const qasymm8::QAsymm8RescaleParams &rescale_parameters,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+
+ /** Create a new dilated depthwise convolution engine.
+ */
+ QAsymm8DilatedDepthwiseConvolution(
+ int n_batches, int n_input_rows, int n_input_cols, int n_channels,
+ int dilation_factor, int n_output_rows, int n_output_cols,
+ nck::ActivationFunction activation,
+ const qasymm8::QAsymm8Params &weight_quantisation,
+ const qasymm8::QAsymm8Params &input_quantisation,
+ const qasymm8::QAsymm8Params &output_quantisation,
+ const qasymm8::QAsymm8RescaleParams& rescale_parameters,
+ unsigned int padding_top, unsigned int padding_left,
+ unsigned int padding_bottom, unsigned int padding_right);
+};
+
+} // namespace depthwise
diff --git a/arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp b/arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp
index 493b2991dc..b102a24250 100644
--- a/arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/depthwise/impl_base.hpp
@@ -32,9 +32,9 @@
#include <algorithm>
#include <cstdint>
-#include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/padding.hpp"
-#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
+#include "depthwise.hpp"
+#include "padding.hpp"
+#include "utils.hpp"
#pragma once
@@ -95,6 +95,28 @@ MEMBERFN()::DepthwiseConvolutionBase(
const unsigned int padding_left,
const unsigned int padding_bottom,
const unsigned int padding_right
+) : DepthwiseConvolutionBase(
+ n_batches, n_input_rows, n_input_cols, n_channels,
+ get_output_size(n_input_rows, padding_top, padding_bottom),
+ get_output_size(n_input_cols, padding_left, padding_right),
+ activation,
+ padding_top, padding_left, padding_bottom, padding_right
+ )
+{
+}
+
+MEMBERFN()::DepthwiseConvolutionBase(
+ const int n_batches,
+ const int n_input_rows,
+ const int n_input_cols,
+ const int n_channels,
+ const int n_output_rows,
+ const int n_output_cols,
+ ActivationFunction activation,
+ const unsigned int padding_top,
+ const unsigned int padding_left,
+ const unsigned int padding_bottom,
+ const unsigned int padding_right
) : _input(nullptr), _output(nullptr),
_packed_parameters(nullptr),
_working_space(nullptr),
@@ -102,8 +124,8 @@ MEMBERFN()::DepthwiseConvolutionBase(
_n_input_rows(n_input_rows),
_n_input_cols(n_input_cols),
_n_channels(n_channels),
- _n_output_rows(get_output_size(n_input_rows, padding_top, padding_bottom)),
- _n_output_cols(get_output_size(n_input_cols, padding_left, padding_right)),
+ _n_output_rows(n_output_rows),
+ _n_output_cols(n_output_cols),
_n_tile_rows(iceildiv(_n_output_rows, output_tile_rows)),
_n_tile_cols(iceildiv(_n_output_cols, output_tile_cols)),
_padding_top(padding_top),
diff --git a/arm_compute/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp b/arm_compute/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp
new file mode 100644
index 0000000000..2ef965ba4b
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/convolution/depthwise/impl_dilated.hpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "depthwise_dilated.hpp"
+#include "utils.hpp"
+
+#define MEMBERFN(TOUT) \
+ template <unsigned int OutputTileRows, unsigned int OutputTileColumns, \
+ unsigned int KernelRows, unsigned int KernelColumns, \
+ unsigned int StrideRows, unsigned int StrideColumns, typename TIn, \
+ typename TBias, typename TOut> \
+ TOUT DilatedDepthwiseConvolution<OutputTileRows, OutputTileColumns, \
+ KernelRows, KernelColumns, StrideRows, \
+ StrideColumns, TIn, TBias, TOut>
+
+namespace depthwise {
+
+MEMBERFN()
+::DilatedDepthwiseConvolution(const int n_batches, const int n_input_rows,
+ const int n_input_cols, const int n_channels,
+ const int dilation_factor,
+ nck::ActivationFunction activation,
+ const unsigned int padding_top,
+ const unsigned int padding_left,
+ const unsigned int padding_bottom,
+ const unsigned int padding_right)
+ : DilatedDepthwiseConvolution(
+ n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor,
+ DilatedDepthwiseConvolution::get_output_size(
+ n_input_rows, padding_top, padding_bottom, dilation_factor),
+ DilatedDepthwiseConvolution::get_output_size(
+ n_input_cols, padding_left, padding_right, dilation_factor),
+ activation, padding_top, padding_left, padding_bottom,
+ padding_right) {}
+
+MEMBERFN()
+::DilatedDepthwiseConvolution(const int n_batches, const int n_input_rows,
+ const int n_input_cols, const int n_channels,
+ const int dilation_factor,
+ const int n_output_rows, const int n_output_cols,
+ nck::ActivationFunction activation,
+ const unsigned int padding_top,
+ const unsigned int padding_left,
+ const unsigned int, // padding_bottom
+ const unsigned int // padding_right
+ )
+ : DilatedDepthwiseConvolution(
+ n_batches, n_input_rows, n_input_cols, n_channels, dilation_factor,
+ n_output_rows, n_output_cols, activation, padding_top, padding_left,
+ 0, 0,
+ // Function which creates a new (standard) depthwise convolution
+ [](const int n_batches, const int n_input_rows,
+ const int n_input_cols, const int n_channels,
+ const int n_output_rows, const int n_output_cols,
+ const nck::ActivationFunction activation,
+ const unsigned int padding_top, const unsigned int padding_left,
+ const unsigned int padding_bottom,
+ const unsigned int padding_right) -> IDepthwiseConvolution * {
+ return new DepthwiseConvolution<
+ OutputTileRows, OutputTileColumns, KernelRows, KernelColumns,
+ StrideRows, StrideColumns, TIn, TBias, TOut>(
+ n_batches, n_input_rows, n_input_cols, n_channels,
+ n_output_rows, n_output_cols, activation, padding_top,
+ padding_left, padding_bottom, padding_right);
+ }) {}
+
+MEMBERFN()
+::DilatedDepthwiseConvolution(
+ const int n_batches, const int n_input_rows, const int n_input_cols,
+ const int n_channels, const int dilation_factor, const int n_output_rows,
+ const int n_output_cols, nck::ActivationFunction activation,
+ const unsigned int padding_top, const unsigned int padding_left,
+ const unsigned int, // padding_bottom
+ const unsigned int, // padding_right
+ std::function<IDepthwiseConvolution *(
+ int, int, int, int, int, int, nck::ActivationFunction, unsigned int,
+ unsigned int, unsigned int, unsigned int)>
+ subconvfn // Function to create a new convolution
+ )
+ : _dilation_factor(dilation_factor), _n_input_rows(n_input_rows),
+ _n_input_cols(n_input_cols), _n_channels(n_channels),
+ _padding_top(static_cast<int>(padding_top)),
+ _padding_left(static_cast<int>(padding_left)),
+ _n_output_rows(n_output_rows), _n_output_cols(n_output_cols),
+ _convs(_dilation_factor) {
+ // Instantiate the base convolutions
+ for (int i = 0; i < _dilation_factor; i++) {
+ // Compute properties of this row of base convolutions
+ const int row_top =
+ i * StrideRows - _padding_top; // -ve values are in the padding
+ const int row_pad_top =
+ row_top < 0 ? iceildiv(-row_top, dilation_factor) : 0;
+
+ const int _n_input_rows = iceildiv(n_input_rows - i, dilation_factor);
+ const int _n_output_rows = iceildiv(n_output_rows - i, dilation_factor);
+
+ for (int j = 0; j < _dilation_factor; j++) {
+ // Compute properties of the base convolution
+ const int col_left =
+ j * StrideColumns - padding_left; // -ve values are in the padding
+ const int col_pad_left =
+ col_left < 0 ? iceildiv(-col_left, dilation_factor) : 0;
+
+ const int _n_input_cols = iceildiv(n_input_cols - j, dilation_factor);
+ const int _n_output_cols = iceildiv(n_output_cols - j, dilation_factor);
+
+ // Create new depthwise convolution engine and include it in the vector
+ // of engines. The new depthwise convolution engine is created by calling
+ // the delegate function we received as an argument.
+ _convs[i].emplace_back(subconvfn(
+ n_batches, _n_input_rows, _n_input_cols, n_channels, _n_output_rows,
+ _n_output_cols, activation,
+ // Note: since we have computed the output tensor size we don't need
+ // to explicitly provide bottom and right padding values to the
+ // depthwise convolution.
+ row_pad_top, col_pad_left, 0, 0));
+ }
+ }
+}
+
+MEMBERFN(void)::set_input(const void *const inptr) {
+ set_input(inptr, _n_channels);
+}
+
+MEMBERFN(void)::set_input(const void *const inptr, const int ldcol) {
+ set_input(inptr, _n_input_cols * ldcol, ldcol);
+}
+
+MEMBERFN(void)
+::set_input(const void *const inptr, const int ldrow, const int ldcol) {
+ set_input(inptr, _n_input_rows * ldrow, ldrow, ldcol);
+}
+
+MEMBERFN(void)
+::set_input(const void *const inptr, const int ldbatch, const int ldrow,
+ const int ldcol) {
+ // Compute dilated strides
+ const int ldrow_dilated = ldrow * _dilation_factor;
+ const int ldcol_dilated = ldcol * _dilation_factor;
+
+ // Pass input parameters on to base convolutions
+ for (int i = 0; i < _dilation_factor; i++) {
+ const int top_pos =
+ i * StrideRows - _padding_top +
+ ((static_cast<int>(i * StrideRows) < _padding_top)
+ ? iceildiv(_padding_top - i * StrideRows, _dilation_factor) *
+ _dilation_factor
+ : 0);
+ const TIn *const inptr_i =
+ static_cast<const TIn *>(inptr) + top_pos * ldrow;
+
+ for (int j = 0; j < _dilation_factor; j++) {
+ int left_pos = j * StrideColumns - _padding_left;
+ while (left_pos < 0)
+ left_pos += _dilation_factor;
+
+ // Modify the pointer to point to the first element of the dilated input
+ // tensor, then set the input for this convolution engine.
+ const void *const inptr_ij = inptr_i + left_pos * ldcol;
+ _convs[i][j]->set_input(inptr_ij, ldbatch, ldrow_dilated, ldcol_dilated);
+ }
+ }
+}
+
+MEMBERFN(void)::set_output(void *const outptr) {
+ set_output(outptr, _n_channels);
+}
+
+MEMBERFN(void)::set_output(void *const outptr, const int ldcol) {
+ set_output(outptr, _n_output_cols * ldcol, ldcol);
+}
+
+MEMBERFN(void)
+::set_output(void *const outptr, const int ldrow, const int ldcol) {
+ set_output(outptr, _n_output_rows * ldrow, ldrow, ldcol);
+}
+
+MEMBERFN(void)
+::set_output(void *const outptr, const int ldbatch, const int ldrow,
+ const int ldcol) {
+ // Compute dilated strides
+ const int ldrow_dilated = ldrow * _dilation_factor;
+ const int ldcol_dilated = ldcol * _dilation_factor;
+
+ // Pass input parameters on to base convolutions
+ for (int i = 0; i < _dilation_factor; i++) {
+ for (int j = 0; j < _dilation_factor; j++) {
+ // Modify the pointer to point to the first element of the dilated input
+ // tensor, then set the input for this convolution engine.
+ void *const outptr_ij =
+ static_cast<TOut *>(outptr) + i * ldrow + j * ldcol;
+ _convs[i][j]->set_output(outptr_ij, ldbatch, ldrow_dilated,
+ ldcol_dilated);
+ }
+ }
+}
+
+MEMBERFN(int)
+::get_output_size(const int dim_size, const unsigned int padding_before,
+ const unsigned int padding_after, const int dilation_factor) {
+ const int input_size =
+ dim_size + static_cast<int>(padding_before + padding_after);
+ const int window_size = (KernelRows - 1) * dilation_factor + 1;
+ return iceildiv(input_size - window_size + 1, StrideRows);
+}
+
+MEMBERFN(int)
+::output_size(const int dim_size, const unsigned int padding_before,
+ const unsigned int padding_after) const {
+ return get_output_size(dim_size, padding_before, padding_after,
+ _dilation_factor);
+}
+
+MEMBERFN(size_t)::get_packed_params_size(void) const {
+ return _convs[0][0]->get_packed_params_size();
+}
+
+MEMBERFN(void)::set_packed_params_buffer(void *buffer) {
+ // Set the buffer for all convolution engines
+ for (auto &&row : _convs) {
+ for (auto &&conv : row) {
+ conv->set_packed_params_buffer(buffer);
+ }
+ }
+}
+
+MEMBERFN(void)
+::pack_params(const void *const weights, const void *const biases) const {
+ _convs[0][0]->pack_params(weights, biases);
+}
+
+MEMBERFN(void)
+::pack_params(void *const buffer, const void *const weights,
+ const void *const biases) const {
+ _convs[0][0]->pack_params(buffer, weights, biases);
+}
+
+MEMBERFN(void)
+::pack_params(void *const buffer, const void *const weights,
+ const unsigned int ldrow, const unsigned int ldcol,
+ const void *const biases) const {
+ _convs[0][0]->pack_params(buffer, weights, ldrow, ldcol, biases);
+}
+
+MEMBERFN(size_t)::get_working_space_size(unsigned int nthreads) const {
+ return _convs[0][0]->get_working_space_size(nthreads);
+}
+
+MEMBERFN(void)::set_working_space(void *const ws) {
+ // Use the same working space set for all contained depthwise engines.
+ for (auto &&row : _convs) {
+ for (auto &&conv : row) {
+ conv->set_working_space(ws);
+ }
+ }
+}
+
+MEMBERFN(unsigned int)::get_window(void) const {
+ return _convs[0][0]->get_window();
+}
+
+MEMBERFN(void)
+::run(const unsigned int start, const unsigned int stop,
+ const unsigned int threadid) {
+ // Run each contained convolution in turn
+ for (auto &&row : _convs) {
+ for (auto &&conv : row) {
+ conv->run(start, stop, threadid);
+ }
+ }
+}
+
+} // namespace depthwise
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 785f6dc3b9..fbf8d17f67 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -523,7 +523,7 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
std::string func_name;
if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
{
- std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
+ std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::OptimizedDepthwiseConvolutionLayer>(
std::string("DepthwiseConvolutionLayer3x3"),
input, weights, biases, output, conv_info, depth_multiplier, fused_act);
}
diff --git a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
index 396e2368c3..81bf53ace6 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
@@ -42,6 +42,7 @@
namespace arm_compute
{
+// Forward declarations
class ITensor;
/** Basic function to execute a depthwise convolution for kernel size 3x3xC. This function calls the following NEON kernels:
@@ -157,6 +158,124 @@ private:
bool _is_prepared;
};
+/** Basic function to execute optimized depthwise convolution routines. This function calls the following NEON kernels:
+ *
+ * @note At the moment 3x3 and 5x5 convolution of stride 1, 2 are supported
+ *
+ * -# @ref NEFillBorderKernel (if pad_x or pad_y > 0) and no assembly kernel implementation is present
+ * -# @ref NEDepthwiseConvolutionLayer3x3Kernel if 3x3 and no assembly kernel implementation is present
+ * -# @ref NEDepthwiseConvolutionAssemblyDispatch if assembly kernel implementation is present
+ * -# @ref NEDirectConvolutionLayerOutputStageKernel if re-quantization of output is required
+ * -# @ref NEActivationLayer if fused activation is required
+ *
+ */
+class NEDepthwiseConvolutionLayerOptimized : public IFunction
+{
+public:
+ /** Default constructor */
+ NEDepthwiseConvolutionLayerOptimized(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEDepthwiseConvolutionLayerOptimized(const NEDepthwiseConvolutionLayerOptimized &) = delete;
+ /** Default move constructor */
+ NEDepthwiseConvolutionLayerOptimized(NEDepthwiseConvolutionLayerOptimized &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEDepthwiseConvolutionLayerOptimized &operator=(const NEDepthwiseConvolutionLayerOptimized &) = delete;
+ /** Default move assignment operator */
+ NEDepthwiseConvolutionLayerOptimized &operator=(NEDepthwiseConvolutionLayerOptimized &&) = default;
+ /** Initialize the function's source, destination, kernels and border_size.
+ *
+ * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [W, H, IFM]. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
+ * Data type supported: Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
+ * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ */
+ void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDepthwiseConvolutionLayer3x3
+ *
+ * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [W, H, IFM]. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
+ * Data type supported: Same as @p input.
+ * @param[in] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
+ * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
+
+ // Inherited methods overriden:
+ void run() override;
+ void prepare() override;
+
+private:
+ /** Configure the kernels/functions for the generic pipeline.
+ *
+ * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [W, H, IFM]. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
+ * Data type supported: Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
+ * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
+ * @param[in] act_info Activation layer information in case of a fused activation.
+ * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ *
+ */
+ void configure_generic(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation = Size2D(1U, 1U));
+ /** Configure the kernels/functions for the optimized pipeline.
+ *
+ * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [W, H, IFM]. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
+ * Data type supported: Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
+ * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
+ * @param[in] act_info Activation layer information in case of a fused activation.
+ */
+ void configure_optimized(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation = Size2D(1U, 1U));
+ /** Run generic kernel */
+ void run_generic();
+ /** Run optimized function */
+ void run_optimized();
+
+private:
+ MemoryGroup _memory_group;
+ NEDepthwiseConvolutionLayer3x3Kernel _dwc_kernel;
+ NEDepthwiseConvolutionAssemblyDispatch _dwc_optimized_func;
+ NEDirectConvolutionLayerOutputStageKernel _output_stage_kernel;
+ NEFillBorderKernel _border_handler;
+ NEPermute _permute_input;
+ NEPermute _permute_weights;
+ NEPermute _permute_output;
+ NEActivationLayer _activationlayer_function;
+ Tensor _accumulator;
+ Tensor _permuted_input;
+ Tensor _permuted_weights;
+ Tensor _permuted_output;
+ const ITensor *_original_weights;
+ bool _has_bias;
+ bool _is_quantized;
+ bool _is_optimized;
+ bool _is_nchw;
+ bool _permute;
+ bool _is_activationlayer_enabled;
+ bool _is_prepared;
+};
+
/** Basic function to execute a generic depthwise convolution. This function calls the following NEON kernels:
*
* -# @ref NEDepthwiseIm2ColKernel
diff --git a/arm_compute/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.h
index 7d2cff7315..b88e750fa9 100644
--- a/arm_compute/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.h
@@ -30,9 +30,6 @@
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/core/NEON/kernels/assembly/NEDepthwiseConvolutionAssemblyKernelWrapper.h"
-#include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise.hpp"
-
namespace arm_compute
{
/** Depthwise convolution assembly kernel glue */
@@ -52,38 +49,44 @@ public:
NEDepthwiseConvolutionAssemblyDispatch &operator=(const NEDepthwiseConvolutionAssemblyDispatch &) = delete;
/** Default move assignment operator */
NEDepthwiseConvolutionAssemblyDispatch &operator=(NEDepthwiseConvolutionAssemblyDispatch &&) = default;
+ /** Default destructor */
+ ~NEDepthwiseConvolutionAssemblyDispatch();
/** Initialize the function's source, destination, kernels and border_size.
*
* @note Supports only NHWC format
*
* @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. These are 3D tensors with shape [3, 3, IFM]. Data type supported: Same as @p input.
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [W, H, IFM]. Data type supported: Same as @p input.
* @param[in] bias (Optional) Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
* Data type supported: Same as @p input.
* @param[out] output Destination tensor. Data type supported: same as @p input.
* @param[in] conv_info Padding and stride information to use for the convolution.
* @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
*/
void configure(const ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
+ const Size2D &dilation = Size2D(1, 1));
/** Static function to check if given info will lead to a valid configuration of @ref NEDepthwiseConvolutionAssemblyDispatch
*
* @note Supports only NHWC format
*
* @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. These are 3D tensors with shape [3, 3, IFM]. Data type supported: Same as @p input.
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [W, H, IFM]. Data type supported: Same as @p input.
* @param[in] bias (Optional) Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
* Data type supported: Same as @p input.
* @param[out] output Destination tensor. Data type supported: same as @p input.
* @param[in] conv_info Padding and stride information to use for the convolution.
* @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
*
* @return An error status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
+ const Size2D &dilation = Size2D(1, 1));
/** Check if the optimized kernel can be used for the given kernel sizes and strides
*
* @warning Even if this return true the inputs and outputs might need to get permuted as the only layout supported is NHWC
@@ -103,16 +106,18 @@ public:
void prepare() override;
private:
- MemoryGroup _memory_group;
- const ITensor *_input;
- const ITensor *_weights;
- const ITensor *_bias;
- ITensor *_output;
- Tensor _packed_weights;
- Tensor _workspace;
- bool _is_prepared;
- std::unique_ptr<depthwise::IDepthwiseConvolution> _dwc_assembly_kernel;
- NEDepthwiseConvolutionAssemblyKernelWrapper _dwc_acl_kernel;
+ struct LocalImpl;
+
+private:
+ MemoryGroup _memory_group;
+ const ITensor *_input;
+ const ITensor *_weights;
+ const ITensor *_bias;
+ ITensor *_output;
+ Tensor _packed_weights;
+ Tensor _workspace;
+ bool _is_prepared;
+ std::unique_ptr<LocalImpl> _pImpl;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEDEPTHWISECONVOLUTIONASSEMBLYDISPATCH_H__ */