aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2021-04-27 17:13:27 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-05-06 14:40:40 +0000
commit1f58f03d82c482626b1b4673b6c0e25da4338fb5 (patch)
treee92451e00d459a2fc0d870694460f482aa4c77ae /src/armnn/test/optimizations
parenta7a12f5c3654da554ad6197beff0f0fc54681c92 (diff)
downloadarmnn-1f58f03d82c482626b1b4673b6c0e25da4338fb5.tar.gz
IVGCVSW-5815 Generalise ConstCpuTensorHandle
* Generalises ConstCpuTensorHandle and inherited classes by removing 'Cpu' from aliases. * New renamed classes: ConstTensorHandle, TensorHandle, ScopedTensorHandle, PassthroughTensorHandle, ConstPassthroughTensorHandle. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp8
5 files changed, 10 insertions, 10 deletions
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 4523e70437..d0d728bfab 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -299,7 +299,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
uint8_t tensor[] = { 1, 1, 1, 1, 1 };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(info1, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(info1, &tensor));
input->GetOutputSlot().SetTensorInfo(info0);
constant->GetOutputSlot().SetTensorInfo(info1);
@@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
constant->GetOutputSlot().SetTensorInfo(constantTermInfo);
float tensor[] = { 2.0f };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(constantTermInfo, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(constantTermInfo, &tensor));
add1->GetOutputSlot().SetTensorInfo(outputInfo);
input->GetOutputSlot().Connect(add1->GetInputSlot(0));
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index bb8e674b56..e4c1f2f413 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 12df462456..1dfe7f431c 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 7d7c6b2b0a..1ddf5262e8 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index a65012eef4..f93fa77b0d 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -72,8 +72,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
armnn::Convolution2dDescriptor descriptor;
auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- conv->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ conv->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ conv->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
conv->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -142,8 +142,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
armnn::FullyConnectedDescriptor descriptor;
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- fc->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ fc->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
fc->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");