diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-03-22 17:51:06 +0000 |
---|---|---|
committer | finn.williams <finn.williams@arm.com> | 2021-04-07 16:42:38 +0000 |
commit | 4422ceca976a88aac49b21808a43e465bc87a35e (patch) | |
tree | d4f7f3d86394f74b679c907ad3f7fc7f4537933f /src/armnn/optimizations/ConvertConstants.hpp | |
parent | b70ec417989490a2a72c66ecd6c737df1c094f4c (diff) | |
download | armnn-4422ceca976a88aac49b21808a43e465bc87a35e.tar.gz |
Fix graph copy memory spike
* Change layer storage of ConstTensors to std::shared_ptr<ConstCpuTensorHandle>
* Change clone to share ConstTensor rather than copy
* Remove uses of non-const GetTensor() call
* Reduce scope of non-optimized network in ExeNet, so memory can be released after use
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: Ibb2c7309d12411d21405bd6024c76bcdf5404545
Diffstat (limited to 'src/armnn/optimizations/ConvertConstants.hpp')
-rw-r--r-- | src/armnn/optimizations/ConvertConstants.hpp | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp index f3ebcdf5d9..df5a5b4f67 100644 --- a/src/armnn/optimizations/ConvertConstants.hpp +++ b/src/armnn/optimizations/ConvertConstants.hpp @@ -23,7 +23,7 @@ namespace optimizations struct BFloat16ToFloat32 { - static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle) + static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle) { const TensorInfo& info = handle->GetTensorInfo(); @@ -31,7 +31,7 @@ struct BFloat16ToFloat32 { std::vector<float> newValues(info.GetNumElements()); - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetTensor<BFloat16>(), + armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetConstTensor<BFloat16>(), info.GetNumElements(), newValues.data()); @@ -44,7 +44,7 @@ struct BFloat16ToFloat32 struct Float16ToFloat32 { - static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle) + static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle) { const TensorInfo& info = handle->GetTensorInfo(); @@ -52,7 +52,7 @@ struct Float16ToFloat32 { std::vector<float> newValues(info.GetNumElements()); - armnnUtils::FloatingPointConverter::ConvertFloat16To32(handle->GetTensor<Half>(), + armnnUtils::FloatingPointConverter::ConvertFloat16To32(handle->GetConstTensor<Half>(), info.GetNumElements(), newValues.data()); @@ -65,7 +65,7 @@ struct Float16ToFloat32 struct Float32ToBFloat16 { - static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle) + static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle) { const TensorInfo& info = handle->GetTensorInfo(); @@ -73,7 +73,7 @@ struct Float32ToBFloat16 { std::vector<BFloat16> newValues(info.GetNumElements()); - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetTensor<float>(), + armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetConstTensor<float>(), info.GetNumElements(), newValues.data()); @@ -86,7 +86,7 @@ struct Float32ToBFloat16 struct Float32ToFloat16 { - static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle) + static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle) { const TensorInfo& info = handle->GetTensorInfo(); @@ -94,7 +94,7 @@ struct Float32ToFloat16 { std::vector<Half> newValues(info.GetNumElements()); - armnnUtils::FloatingPointConverter::ConvertFloat32To16(handle->GetTensor<float>(), + armnnUtils::FloatingPointConverter::ConvertFloat32To16(handle->GetConstTensor<float>(), info.GetNumElements(), newValues.data()); |