aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp156
-rw-r--r--src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h149
-rw-r--r--src/runtime/NEON/functions/NEPoolingLayer.cpp103
-rw-r--r--src/runtime/cpu/operators/CpuPooling.cpp130
-rw-r--r--src/runtime/cpu/operators/CpuPooling.h102
-rw-r--r--src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp98
-rw-r--r--src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h89
7 files changed, 443 insertions, 384 deletions
diff --git a/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp
deleted file mode 100644
index 427cd2eb77..0000000000
--- a/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h"
-
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/CPP/Validate.h"
-#include "src/core/NEON/kernels/assembly/NEPoolingAssemblyWrapperKernel.h"
-
-namespace arm_compute
-{
-namespace experimental
-{
-NEPoolingAssemblyDispatch::~NEPoolingAssemblyDispatch() = default;
-
-void NEPoolingAssemblyDispatch::configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info)
-{
- const CPUInfo &ci = NEScheduler::get().cpu_info();
- const unsigned int num_threads = NEScheduler::get().num_threads();
-
- // If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
- if(!NEPoolingAssemblyDispatch::validate(input, output, info))
- {
- return;
- }
-
- auto pooling_wrapper = std::make_unique<NEPoolingAssemblyWrapperKernel>();
- ARM_COMPUTE_ERROR_ON(pooling_wrapper == nullptr);
- pooling_wrapper->configure(input, output, info, ci);
-
- // Check if we have Global Pooling Layer
- _is_global_pooling_layer = (input->dimension(2) == info.pool_size.width) && (input->dimension(1) == info.pool_size.height);
-
- // Set workspace requirements
- const unsigned int alignment = 4096;
- _workspace.push_back(MemoryInfo(TensorType::ACL_DST_1, pooling_wrapper->get_working_size(num_threads), alignment));
-
- _kernel = std::move(pooling_wrapper);
-}
-
-Status NEPoolingAssemblyDispatch::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info)
-{
- return NEPoolingAssemblyWrapperKernel::validate(input, output, info);
-}
-
-bool NEPoolingAssemblyDispatch::is_configured() const
-{
- return _kernel != nullptr;
-}
-
-void NEPoolingAssemblyDispatch::run(ITensorPack &tensors)
-{
- if(tensors.empty())
- {
- ARM_COMPUTE_ERROR("No inputs provided");
- }
-
- if(_is_global_pooling_layer)
- {
- NEScheduler::get().schedule_op(_kernel.get(), Window::DimX, _kernel->window(), tensors);
- }
- else
- {
- NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
- }
-}
-} // namespace experimental
-
-struct NEPoolingAssemblyDispatch::Impl
-{
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- ITensor *workspace{ nullptr };
- std::unique_ptr<experimental::NEPoolingAssemblyDispatch> op{ nullptr };
-};
-
-NEPoolingAssemblyDispatch::NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&) = default;
-
-NEPoolingAssemblyDispatch &NEPoolingAssemblyDispatch::operator=(NEPoolingAssemblyDispatch &&) = default;
-
-NEPoolingAssemblyDispatch::~NEPoolingAssemblyDispatch() = default;
-
-NEPoolingAssemblyDispatch::NEPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
- : _impl(std::make_unique<Impl>()),
- _memory_group(std::move(memory_manager)),
- _workspace()
-{
-}
-
-void NEPoolingAssemblyDispatch::configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- _impl->src = input;
- _impl->dst = output;
- _impl->workspace = &_workspace;
-
- _impl->op = std::make_unique<experimental::NEPoolingAssemblyDispatch>();
- _impl->op->configure(input->info(), output->info(), info);
-
- const auto workspace = _impl->op->workspace().at(0);
- if(workspace.size > 0)
- {
- // Allocate workspace
- allocate_workspace(workspace.size, workspace.alignment);
- }
-}
-
-Status NEPoolingAssemblyDispatch::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info)
-{
- return experimental::NEPoolingAssemblyDispatch::validate(input, output, info);
-}
-
-bool NEPoolingAssemblyDispatch::is_configured() const
-{
- return _impl->op->is_configured();
-}
-
-void NEPoolingAssemblyDispatch::run()
-{
- ITensorPack pack;
- pack.add_tensor(TensorType::ACL_SRC, _impl->src);
- pack.add_tensor(TensorType::ACL_DST_0, _impl->dst);
- pack.add_tensor(TensorType::ACL_DST_1, _impl->workspace);
- _impl->op->run(pack);
-}
-
-void NEPoolingAssemblyDispatch::allocate_workspace(size_t workspace_size, size_t alignment)
-{
- ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
- _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
- _memory_group.manage(&_workspace);
- _workspace.allocator()->allocate();
-}
-} //namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h b/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h
deleted file mode 100644
index f6d232b931..0000000000
--- a/src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H
-#define ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H
-
-#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/IMemoryManager.h"
-#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/INEOperator.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "src/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-// Forward Declarations
-class ITensor;
-struct PoolingLayerInfo;
-
-/** Assembly kernel glue */
-class NEPoolingAssemblyDispatch : public IFunction
-{
-public:
- /** Constructor */
- NEPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEPoolingAssemblyDispatch(const NEPoolingAssemblyDispatch &) = delete;
- /** Default move constructor */
- NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&);
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEPoolingAssemblyDispatch &operator=(const NEPoolingAssemblyDispatch &) = delete;
- /** Default move assignment operator */
- NEPoolingAssemblyDispatch &operator=(NEPoolingAssemblyDispatch &&);
- /** Destructor */
- ~NEPoolingAssemblyDispatch();
-
- /** If supported create an assembly routine, else fallback to Compute Library function.
- *
- * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
- * @param[in] info Pooling meta-data
- */
- void configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &info);
-
- /** Indicates whether or not this function can be used to process the given parameters.
- *
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
- * @param[in] info Pooling meta-data
- *
- * @return a status.
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
-
- /** Was the function successfully configured ?
- *
- * @return True if the function is configured and ready to run
- */
- bool is_configured() const;
-
- // Inherited methods overridden:
- void run() override;
-
-private:
- /** Helper function to allocate memory for the workspace needed by the
- * assembly kernels
- *
- * @param[in] workspace_size Total size of the workspace.
- * @param[in] alignment Alignment requirement in bytes.
- */
- void allocate_workspace(size_t workspace_size, size_t alignment);
-
- struct Impl;
- std::unique_ptr<Impl> _impl;
-
- MemoryGroup _memory_group{};
- Tensor _workspace{};
-};
-
-namespace experimental
-{
-/** Basic function to run pooling assembly kernels */
-class NEPoolingAssemblyDispatch : public INEOperator
-{
-public:
- /** Constructor */
- NEPoolingAssemblyDispatch() = default;
- /** Prevent instances of this class from being copied */
- NEPoolingAssemblyDispatch(const NEPoolingAssemblyDispatch &) = delete;
- /** Default move constructor */
- NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&) = default;
- /** Prevent instances of this class from being copied */
- NEPoolingAssemblyDispatch &operator=(const NEPoolingAssemblyDispatch &) = delete;
- /** Default move assignment operator */
- NEPoolingAssemblyDispatch &operator=(NEPoolingAssemblyDispatch &&) = default;
- /** Destructor */
- ~NEPoolingAssemblyDispatch();
-
- /** If supported create an assembly routine, else fallback to Compute Library function.
- *
- * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
- * @param[in] info Pooling meta-data
- */
- void configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info);
-
- /** Indicates whether or not this function can be used to process the given parameters.
- *
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
- * @param[in] info Pooling meta-data
- *
- * @return a status.
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
- /** Was the function successfully configured ?
- *
- * @return True if the function is configured and ready to run
- */
- bool is_configured() const;
- // Run method overriden
- void run(ITensorPack &tensors) override;
-
-private:
- bool _is_global_pooling_layer{ false };
-};
-} // namespace experimental
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H */
diff --git a/src/runtime/NEON/functions/NEPoolingLayer.cpp b/src/runtime/NEON/functions/NEPoolingLayer.cpp
index 0c857b54dc..dd7a3a337e 100644
--- a/src/runtime/NEON/functions/NEPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEPoolingLayer.cpp
@@ -23,103 +23,48 @@
*/
#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "src/core/NEON/kernels/NEPoolingLayerKernel.h"
-#include "src/runtime/NEON/functions/NEPoolingAssemblyDispatch.h"
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuPooling.h"
namespace arm_compute
{
+struct NEPoolingLayer::Impl
+{
+ ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ ITensor *indices{ nullptr };
+ std::shared_ptr<IMemoryManager> memory_manager{ nullptr };
+ std::unique_ptr<cpu::CpuPooling> op{ nullptr };
+};
+
NEPoolingLayer::~NEPoolingLayer() = default;
NEPoolingLayer::NEPoolingLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_manager(std::move(memory_manager)), _pooling_layer_kernel(), _border_handler(), _asm_glue(), _is_global_pooling_layer(false), _data_layout(DataLayout::NCHW)
+ : _impl(std::make_unique<Impl>())
{
+ _impl->memory_manager = std::move(memory_manager);
}
void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info, ITensor *indices)
{
- // Check if we can run assembly kernels. Currently, indices are not supported by those kernels
- const bool run_optimised = bool(NEPoolingAssemblyDispatch::validate(input->info(), output->info(), pool_info)) && (indices == nullptr);
-
- if(run_optimised)
- {
- _asm_glue = std::make_unique<NEPoolingAssemblyDispatch>(_memory_manager);
- _asm_glue->configure(input, output, pool_info);
- ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
- }
- else
- {
- // Check if we have Global Pooling Layer
- _is_global_pooling_layer = (input->info()->dimension(0) == pool_info.pool_size.width) && (input->info()->dimension(1) == pool_info.pool_size.height);
-
- // Get data layout
- _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout;
-
- // Configure pooling kernel
- _pooling_layer_kernel = std::make_unique<NEPoolingLayerKernel>();
- _pooling_layer_kernel->configure(input, output, pool_info, indices);
-
- switch(_data_layout)
- {
- case DataLayout::NCHW:
- {
- // Configure border depending on operation required (quantize border in case of asymmetric data_type)
- BorderMode border_mode = (!indices && pool_info.pool_type == PoolingType::MAX) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
- PixelValue zero_value((indices) ? std::numeric_limits<int>::min() : 0.f);
- if(is_data_type_quantized_asymmetric(input->info()->data_type()) && !pool_info.exclude_padding)
- {
- zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
- }
- _border_handler = std::make_unique<NEFillBorderKernel>();
- _border_handler->configure(input, _pooling_layer_kernel->border_size(), border_mode, zero_value);
- break;
- }
- case DataLayout::NHWC:
- break;
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
- }
+ _impl->src = input;
+ _impl->dst = output;
+ _impl->indices = indices;
+ _impl->op = std::make_unique<cpu::CpuPooling>(_impl->memory_manager);
+ _impl->op->configure(input->info(), output->info(), pool_info, (indices) ? indices->info() : nullptr);
}
Status NEPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
{
- const bool run_optimised = bool(NEPoolingAssemblyDispatch::validate(input, output, pool_info)) && (indices == nullptr);
-
- if(run_optimised)
- {
- return Status{};
- }
-
- return NEPoolingLayerKernel::validate(input, output, pool_info, indices);
+ return cpu::CpuPooling::validate(input, output, pool_info, indices);
}
void NEPoolingLayer::run()
{
- if(_asm_glue && _asm_glue->is_configured())
- {
- _asm_glue->run();
- }
- else
- {
- switch(_data_layout)
- {
- case DataLayout::NCHW:
- // Fill border
- NEScheduler::get().schedule(_border_handler.get(), Window::DimY);
-
- // Run pooling layer
- NEScheduler::get().schedule(_pooling_layer_kernel.get(), _is_global_pooling_layer ? Window::DimZ : Window::DimY);
- break;
- case DataLayout::NHWC:
- // Run pooling layer
- NEScheduler::get().schedule(_pooling_layer_kernel.get(), Window::DimX);
- break;
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
- }
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_DST_0, _impl->dst);
+ pack.add_tensor(TensorType::ACL_DST_1, _impl->indices);
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuPooling.cpp b/src/runtime/cpu/operators/CpuPooling.cpp
new file mode 100644
index 0000000000..0b9b38d079
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPooling.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuPooling.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/NEON/kernels/NEFillBorderKernel.h"
+#include "src/core/cpu/kernels/CpuPoolingKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuPooling::CpuPooling(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_manager(std::move(memory_manager)), _pooling_layer_kernel(), _border_handler(), _asm_glue(), _is_global_pooling_layer(false), _data_layout(DataLayout::NCHW)
+{
+}
+
+CpuPooling::~CpuPooling() = default;
+
+void CpuPooling::configure(ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
+{
+ // Check if we can run assembly kernels. Currently, indices are not supported by those kernels
+ const bool run_optimised = bool(CpuPoolingAssemblyDispatch::validate(input, output, pool_info)) && (indices == nullptr);
+
+ if(run_optimised)
+ {
+ _asm_glue = std::make_unique<CpuPoolingAssemblyDispatch>(_memory_manager);
+ _asm_glue->configure(input, output, pool_info);
+ ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
+ }
+ else
+ {
+ // Check if we have Global Pooling Layer
+ _is_global_pooling_layer = (input->dimension(0) == pool_info.pool_size.width) && (input->dimension(1) == pool_info.pool_size.height);
+
+ // Get data layout
+ _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->data_layout() : pool_info.data_layout;
+
+ // Configure pooling kernel
+ auto k = std::make_unique<kernels::CpuPoolingKernel>();
+ k->configure(input, output, pool_info, indices);
+ _pooling_layer_kernel = std::move(k);
+
+ switch(_data_layout)
+ {
+ case DataLayout::NCHW:
+ {
+ // Configure border depending on operation required (quantize border in case of asymmetric data_type)
+ BorderMode border_mode = (!indices && pool_info.pool_type == PoolingType::MAX) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
+ PixelValue zero_value((indices) ? std::numeric_limits<int>::min() : 0.f);
+ if(is_data_type_quantized_asymmetric(input->data_type()) && !pool_info.exclude_padding)
+ {
+ zero_value = PixelValue(0, input->data_type(), input->quantization_info());
+ }
+ auto b = std::make_unique<NEFillBorderKernel>();
+ b->configure(input, _pooling_layer_kernel->border_size(), border_mode, zero_value);
+ _border_handler = std::move(b);
+ break;
+ }
+ case DataLayout::NHWC:
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data layout not supported");
+ }
+ }
+}
+
+Status CpuPooling::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
+{
+ const bool run_optimised = bool(CpuPoolingAssemblyDispatch::validate(input, output, pool_info)) && (indices == nullptr);
+
+ if(run_optimised)
+ {
+ return Status{};
+ }
+
+ return kernels::CpuPoolingKernel::validate(input, output, pool_info, indices);
+}
+
+void CpuPooling::run(ITensorPack &tensors)
+{
+ if(_asm_glue && _asm_glue->is_configured())
+ {
+ _asm_glue->run(tensors);
+ }
+ else
+ {
+ switch(_data_layout)
+ {
+ case DataLayout::NCHW:
+ // Fill border
+ NEScheduler::get().schedule_op(_border_handler.get(), Window::DimY, _border_handler->window(), tensors);
+
+ // Run pooling layer
+ NEScheduler::get().schedule_op(_pooling_layer_kernel.get(), _is_global_pooling_layer ? Window::DimZ : Window::DimY, _pooling_layer_kernel->window(), tensors);
+ break;
+ case DataLayout::NHWC:
+ // Run pooling layer
+ NEScheduler::get().schedule_op(_pooling_layer_kernel.get(), Window::DimX, _pooling_layer_kernel->window(), tensors);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data layout not supported");
+ }
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuPooling.h b/src/runtime/cpu/operators/CpuPooling.h
new file mode 100644
index 0000000000..aa607b4b44
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPooling.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_POOLING_H
+#define ARM_COMPUTE_CPU_POOLING_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+// Forward Declarations
+struct PoolingLayerInfo;
+
+namespace cpu
+{
+// Forward Declarations
+class CpuPoolingAssemblyDispatch;
+/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following NEON kernels:
+ *
+ * -# @ref NEFillBorderKernel (executed if padding size is different from zero)
+ * -# @ref kernels::CpuPoolingKernel
+ * -# @ref CpuPoolingAssemblyDispatch
+ */
+class CpuPooling : public ICpuOperator
+{
+public:
+ /** Constructor */
+ CpuPooling(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CpuPooling(const CpuPooling &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CpuPooling &operator=(const CpuPooling &) = delete;
+ /** Prevent instances of this class from being moved (As this class contains non movable objects) */
+ CpuPooling(CpuPooling &&) = delete;
+ /** Prevent instances of this class from being moved (As this class contains non movable objects) */
+ CpuPooling &operator=(CpuPooling &&) = delete;
+ /** Default destructor */
+ ~CpuPooling();
+ /** Set the src and dst tensors.
+ *
+ * @note F16 is supported for pool sizes 2 and 3 only
+ *
+ * @param[in, out] src Source tensor info. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[out] dst Destination tensor info. Data types supported: same as @p src.
+ * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
+ * @param[out] indices (optional) The indices of the maximal values. Data type supported: U32.
+ */
+ void configure(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr);
+ /** Static function to check if given info will lead to a valid configuration of @ref CpuPooling
+ *
+ * @note F16 is supported for pool sizes 2 and 3 only
+ *
+ * @param[in] src Source tensor info. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] dst Destination tensor info. Data types supported: same as @p src.
+ * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
+ * @param[in] indices (optional) Tensor info of the indices of the maximal values. Data type supported: U32.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices = nullptr);
+
+ // Inherited methods overridden:
+ void run(ITensorPack &tensors) override;
+
+private:
+ std::shared_ptr<IMemoryManager> _memory_manager;
+
+ std::unique_ptr<INEKernel> _pooling_layer_kernel;
+ std::unique_ptr<INEKernel> _border_handler;
+ std::unique_ptr<CpuPoolingAssemblyDispatch> _asm_glue;
+
+ bool _is_global_pooling_layer;
+ DataLayout _data_layout;
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_POOLING_H */
diff --git a/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
new file mode 100644
index 0000000000..4a5623394f
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/cpu/kernels/CpuPoolingAssemblyWrapperKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuPoolingAssemblyDispatch::CpuPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)),
+ _workspace(),
+ _is_global_pooling_layer(false)
+{
+}
+
+CpuPoolingAssemblyDispatch::~CpuPoolingAssemblyDispatch() = default;
+
+void CpuPoolingAssemblyDispatch::configure(const ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info)
+{
+ const CPUInfo &ci = NEScheduler::get().cpu_info();
+ const unsigned int num_threads = NEScheduler::get().num_threads();
+
+ // If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
+ if(!CpuPoolingAssemblyDispatch::validate(src, dst, info))
+ {
+ return;
+ }
+
+ auto pooling_wrapper = std::make_unique<kernels::CpuPoolingAssemblyWrapperKernel>();
+ ARM_COMPUTE_ERROR_ON(pooling_wrapper == nullptr);
+ pooling_wrapper->configure(src, dst, info, ci);
+
+ // Check if we have Global Pooling Layer
+ _is_global_pooling_layer = (src->dimension(2) == info.pool_size.width) && (src->dimension(1) == info.pool_size.height);
+
+ // Allocate workspace based on kernel's memory requirements
+ constexpr size_t alignment = 4096;
+ const size_t workspace_size = pooling_wrapper->get_working_size(num_threads);
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+ _memory_group.manage(&_workspace);
+ _workspace.allocator()->allocate();
+
+ _kernel = std::move(pooling_wrapper);
+}
+
+Status CpuPoolingAssemblyDispatch::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info)
+{
+ return kernels::CpuPoolingAssemblyWrapperKernel::validate(src, dst, info);
+}
+
+bool CpuPoolingAssemblyDispatch::is_configured() const
+{
+ return _kernel != nullptr;
+}
+
+void CpuPoolingAssemblyDispatch::run(ITensorPack &tensors)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No srcs provided");
+
+ tensors.add_tensor(TensorType::ACL_DST_1, &_workspace);
+
+ if(_is_global_pooling_layer)
+ {
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimX, _kernel->window(), tensors);
+ }
+ else
+ {
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h
new file mode 100644
index 0000000000..353bbe1a78
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_POOLING_ASSEMBLY_DISPATCH_H
+#define ARM_COMPUTE_CPU_POOLING_ASSEMBLY_DISPATCH_H
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+class ITensor;
+
+/** Basic function to run pooling assembly kernels */
+class CpuPoolingAssemblyDispatch : public ICpuOperator
+{
+public:
+ /** Constructor */
+ CpuPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied */
+ CpuPoolingAssemblyDispatch(const CpuPoolingAssemblyDispatch &) = delete;
+ /** Default move constructor */
+ CpuPoolingAssemblyDispatch(CpuPoolingAssemblyDispatch &&) = default;
+ /** Prevent instances of this class from being copied */
+ CpuPoolingAssemblyDispatch &operator=(const CpuPoolingAssemblyDispatch &) = delete;
+ /** Default move assignment operator */
+ CpuPoolingAssemblyDispatch &operator=(CpuPoolingAssemblyDispatch &&) = default;
+ /** Destructor */
+ ~CpuPoolingAssemblyDispatch();
+
+ /** If supported create an assembly routine, else fallback to Compute Library function.
+ *
+ * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[out] dst Destination tensor info to store the result of pooling. Data types supported: same as @p src.
+ * @param[in] info Pooling meta-data
+ */
+ void configure(const ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info);
+
+ /** Indicates whether or not this function can be used to process the given parameters.
+ *
+ * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] dst Destination tensor to store the result of pooling. Data types supported: same as @p src.
+ * @param[in] info Pooling meta-data
+ *
+ * @return a status.
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info);
+ /** Was the function successfully configured ?
+ *
+ * @return True if the function is configured and ready to run
+ */
+ bool is_configured() const;
+ // Run method overriden
+ void run(ITensorPack &tensors) override;
+
+private:
+ arm_compute::MemoryGroup _memory_group;
+
+ arm_compute::Tensor _workspace;
+ bool _is_global_pooling_layer;
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_POOLING_ASSEMBLY_DISPATCH_H */