diff options
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h')
-rw-r--r-- | arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h index 4d23b8bd65..d00e8a46ed 100644 --- a/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h +++ b/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h @@ -53,7 +53,7 @@ public: NEDepthwiseWeightsReshapeKernel &operator=(NEDepthwiseWeightsReshapeKernel &&) = default; /** Set the input and output of the kernel. * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: F32. + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: QASYMM8, F32. * @param[out] output The output tensor. Data type supported: same as @p input. * @param[in] biases (Optional) The input biases to add. Shape [IFM]. Data type supported: same as @p input. */ @@ -63,9 +63,13 @@ public: void run(const Window &window, const ThreadInfo &info) override; private: - const ITensor *_input; - ITensor *_output; - const ITensor *_biases; + using DepthwiseWeightsReshapeFunction = void(const ITensor *input, const ITensor *bias, ITensor *output, const Window &window); + +private: + DepthwiseWeightsReshapeFunction *_func; + const ITensor *_input; + ITensor *_output; + const ITensor *_biases; }; } // arm_compute #endif /*__ARM_COMPUTE_NEDEPTHWISEWEIGHTSRESHAPEKERNEL_H__ */ |