aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions/CLSoftmaxLayer.cpp
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-06-09 16:37:32 +0100
committerManuel Bottini <manuel.bottini@arm.com>2021-06-15 16:31:27 +0000
commit94f799e8f6f605333d40472860fb472e8ba6d83d (patch)
treece528244814463ed42dc86a84d54ea870c75d592 /src/runtime/CL/functions/CLSoftmaxLayer.cpp
parent36dff9f81e3a95aea19fcc7246a4896930a14bc6 (diff)
downloadComputeLibrary-94f799e8f6f605333d40472860fb472e8ba6d83d.tar.gz
Fix incorrect memory handling in ported functions
Details of the functions: - ClSoftmax - CpuSoftmax - CpuPool2d Change-Id: Icd2c14d5df010c3b2301e2693ce6f414d7c61916 Resolves: COMPMID-4404 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5797 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLSoftmaxLayer.cpp')
-rw-r--r--src/runtime/CL/functions/CLSoftmaxLayer.cpp44
1 files changed, 7 insertions, 37 deletions
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
index fe45f65beb..de58bf1b02 100644
--- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp
+++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
@@ -29,6 +29,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
#include "src/core/gpu/cl/kernels/ClSoftmaxKernel.h"
+#include "src/core/helpers/MemoryHelpers.h"
#include "src/runtime/gpu/cl/operators/ClPermute.h"
#include "src/runtime/gpu/cl/operators/ClSoftmax.h"
@@ -43,7 +44,8 @@ struct CLSoftmaxLayerGeneric<IS_LOG>::Impl
ICLTensor *dst{ nullptr };
std::unique_ptr<OperatorType> op{ nullptr };
MemoryGroup memory_group{};
- std::vector<std::pair<int, std::unique_ptr<CLTensor>>> workspace_tensors{};
+ ITensorPack run_pack{};
+ WorkspaceData<CLTensor> workspace_tensors{};
};
template <bool IS_LOG>
@@ -71,7 +73,9 @@ void CLSoftmaxLayerGeneric<IS_LOG>::configure(const CLCompileContext &compile_co
SoftmaxKernelInfo softmax_info{ beta, IS_LOG, input->info()->data_type(), axis };
_impl->op->configure(compile_context, *input->info(), *output->info(), softmax_info);
- allocate_workspace();
+
+ _impl->run_pack = { { TensorType::ACL_SRC, _impl->src }, { TensorType::ACL_DST, _impl->dst } };
+ _impl->workspace_tensors = manage_workspace<CLTensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack);
}
template <bool IS_LOG>
@@ -82,46 +86,12 @@ Status CLSoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const I
}
template <bool IS_LOG>
-void CLSoftmaxLayerGeneric<IS_LOG>::allocate_workspace()
-{
- const auto memory_requirements = _impl->op->workspace();
- std::for_each(memory_requirements.begin(), memory_requirements.end(), [this](const experimental::MemoryInfo & memory_info)
- {
- auto tensor_info = TensorInfo{ TensorShape(memory_info.size), 1, DataType::U8 };
- _impl->workspace_tensors.emplace_back(memory_info.slot, std::make_unique<CLTensor>());
- auto tensor = _impl->workspace_tensors.back().second.get();
- ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
- tensor->allocator()->init(tensor_info);
- _impl->memory_group.manage(tensor);
- });
-
- std::for_each(_impl->workspace_tensors.begin(), _impl->workspace_tensors.end(), [](std::pair<int, std::unique_ptr<CLTensor>> &wt)
- {
- auto tensor = wt.second.get();
- tensor->allocator()->allocate();
- });
-}
-
-template <bool IS_LOG>
void CLSoftmaxLayerGeneric<IS_LOG>::run()
{
// Acquire all the temporaries
MemoryGroupResourceScope scope_mg(_impl->memory_group);
-
ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
-
- ITensorPack pack;
- pack.add_tensor(TensorType::ACL_SRC, _impl->src);
- pack.add_tensor(TensorType::ACL_DST, _impl->dst);
-
- std::for_each(_impl->workspace_tensors.begin(), _impl->workspace_tensors.end(), [&pack](std::pair<int, std::unique_ptr<CLTensor>> &wt)
- {
- auto tensor = wt.second.get();
- ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
- pack.add_tensor(wt.first, tensor);
- });
-
- _impl->op->run(pack);
+ _impl->op->run(_impl->run_pack);
}
template class CLSoftmaxLayerGeneric<false>;