aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-03-22 17:51:06 +0000
committerfinn.williams <finn.williams@arm.com>2021-04-07 16:42:38 +0000
commit4422ceca976a88aac49b21808a43e465bc87a35e (patch)
treed4f7f3d86394f74b679c907ad3f7fc7f4537933f /src/armnn/test/optimizations
parentb70ec417989490a2a72c66ecd6c737df1c094f4c (diff)
downloadarmnn-4422ceca976a88aac49b21808a43e465bc87a35e.tar.gz
Fix graph copy memory spike
* Change layer storage of ConstTensors to std::shared_ptr<ConstCpuTensorHandle> * Change clone to share ConstTensor rather than copy * Remove uses of non-const GetTensor() call * Reduce scope of non-optimized network in ExeNet, so memory can be released after use Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: Ibb2c7309d12411d21405bd6024c76bcdf5404545
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp5
4 files changed, 7 insertions, 6 deletions
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index 5cb89daafd..bb8e674b56 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -57,7 +57,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
// Check whether data matches expected Bf16 data
- BFloat16* data = fc->m_Weight->GetTensor<BFloat16>();
+ const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
BOOST_CHECK(data[0] == BFloat16(0.0f));
BOOST_CHECK(data[1] == BFloat16(-1.0f));
BOOST_CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
@@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
- float* data = fc->m_Weight->GetTensor<float>();
+ const float* data = fc->m_Weight->GetConstTensor<float>();
BOOST_CHECK(data[0] == 0.0f);
BOOST_CHECK(data[1] == -1.0f);
BOOST_CHECK(data[2] == 3.796875f);
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 84f525f4bf..12df462456 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -50,7 +50,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether data matches expected fp16 data
- Half* data = fc->m_Weight->GetTensor<Half>();
+ const Half* data = fc->m_Weight->GetConstTensor<Half>();
BOOST_CHECK(data[0] == Half(1.0f));
BOOST_CHECK(data[1] == Half(2.0f));
BOOST_CHECK(data[2] == Half(3.0f));
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index f2dea53afd..7d7c6b2b0a 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -50,7 +50,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
- float* data = fc->m_Weight->GetTensor<float>();
+ const float* data = fc->m_Weight->GetConstTensor<float>();
BOOST_CHECK(1.0f == data[0]);
BOOST_CHECK(2.0f == data[1]);
BOOST_CHECK(3.0f == data[2]);
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index b35f983434..a65012eef4 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -101,7 +101,7 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
// Check whether data matches expected Bf16 data
- armnn::BFloat16* data = conv->m_Weight->GetTensor<armnn::BFloat16>();
+ const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
@@ -171,7 +171,7 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
// Check whether data matches expected Bf16 data
- armnn::BFloat16* data = fc->m_Weight->GetTensor<armnn::BFloat16>();
+ const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
@@ -182,4 +182,5 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
+
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file