aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/optimizations
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-03-22 17:51:06 +0000
committerfinn.williams <finn.williams@arm.com>2021-04-07 16:42:38 +0000
commit4422ceca976a88aac49b21808a43e465bc87a35e (patch)
treed4f7f3d86394f74b679c907ad3f7fc7f4537933f /src/armnn/optimizations
parentb70ec417989490a2a72c66ecd6c737df1c094f4c (diff)
downloadarmnn-4422ceca976a88aac49b21808a43e465bc87a35e.tar.gz
Fix graph copy memory spike
* Change layer storage of ConstTensors to std::shared_ptr<ConstCpuTensorHandle> * Change clone to share ConstTensor rather than copy * Remove uses of non-const GetTensor() call * Reduce scope of non-optimized network in ExeNet, so memory can be released after use Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: Ibb2c7309d12411d21405bd6024c76bcdf5404545
Diffstat (limited to 'src/armnn/optimizations')
-rw-r--r--src/armnn/optimizations/AddBroadcastReshapeLayer.hpp2
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp16
-rw-r--r--src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp7
3 files changed, 13 insertions, 12 deletions
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index 26661cfcde..0a5ad9d152 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -74,7 +74,7 @@ public:
ConstantLayer& constantLayer = static_cast<ConstantLayer&>(parentLayer);
constantLayer.m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(
- ConstTensor(reshapeInfo,constantLayer.m_LayerOutput.get()->GetTensor<void>()));
+ ConstTensor(reshapeInfo,constantLayer.m_LayerOutput.get()->GetConstTensor<void>()));
constantLayer.GetOutputSlot().SetTensorInfo(reshapeInfo);
return;
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index f3ebcdf5d9..df5a5b4f67 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -23,7 +23,7 @@ namespace optimizations
struct BFloat16ToFloat32
{
- static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -31,7 +31,7 @@ struct BFloat16ToFloat32
{
std::vector<float> newValues(info.GetNumElements());
- armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetTensor<BFloat16>(),
+ armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetConstTensor<BFloat16>(),
info.GetNumElements(),
newValues.data());
@@ -44,7 +44,7 @@ struct BFloat16ToFloat32
struct Float16ToFloat32
{
- static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -52,7 +52,7 @@ struct Float16ToFloat32
{
std::vector<float> newValues(info.GetNumElements());
- armnnUtils::FloatingPointConverter::ConvertFloat16To32(handle->GetTensor<Half>(),
+ armnnUtils::FloatingPointConverter::ConvertFloat16To32(handle->GetConstTensor<Half>(),
info.GetNumElements(),
newValues.data());
@@ -65,7 +65,7 @@ struct Float16ToFloat32
struct Float32ToBFloat16
{
- static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -73,7 +73,7 @@ struct Float32ToBFloat16
{
std::vector<BFloat16> newValues(info.GetNumElements());
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetTensor<float>(),
+ armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetConstTensor<float>(),
info.GetNumElements(),
newValues.data());
@@ -86,7 +86,7 @@ struct Float32ToBFloat16
struct Float32ToFloat16
{
- static void Func(std::unique_ptr<ScopedCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -94,7 +94,7 @@ struct Float32ToFloat16
{
std::vector<Half> newValues(info.GetNumElements());
- armnnUtils::FloatingPointConverter::ConvertFloat32To16(handle->GetTensor<float>(),
+ armnnUtils::FloatingPointConverter::ConvertFloat32To16(handle->GetConstTensor<float>(),
info.GetNumElements(),
newValues.data());
diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
index c45ab2cded..a0856a485b 100644
--- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
+++ b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
@@ -27,9 +27,10 @@ inline LayerT* ConvertWeight(Layer* l)
{
std::vector<BFloat16> newValues(info.GetNumElements());
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(layer->m_Weight->template GetTensor<float>(),
- info.GetNumElements(),
- newValues.data());
+ armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(
+ layer->m_Weight->template GetConstTensor<float>(),
+ info.GetNumElements(),
+ newValues.data());
TensorInfo newInfo(info);
newInfo.SetDataType(DataType::BFloat16);