From 1f58f03d82c482626b1b4673b6c0e25da4338fb5 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Tue, 27 Apr 2021 17:13:27 +0100 Subject: IVGCVSW-5815 Generalise ConstCpuTensorHandle * Generalises ConstCpuTensorHandle and inherited classes by removing 'Cpu' from aliases. * New renamed classes: ConstTensorHandle, TensorHandle, ScopedTensorHandle, PassthroughTensorHandle, ConstPassthroughTensorHandle. Signed-off-by: James Conroy Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16 --- src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp | 4 ++-- src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp | 4 ++-- src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp | 2 +- src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp | 2 +- src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp | 8 ++++---- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'src/armnn/test/optimizations') diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp index 4523e70437..d0d728bfab 100644 --- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp +++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp @@ -299,7 +299,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest) uint8_t tensor[] = { 1, 1, 1, 1, 1 }; - constant->m_LayerOutput = std::make_unique(ConstTensor(info1, &tensor)); + constant->m_LayerOutput = std::make_unique(ConstTensor(info1, &tensor)); input->GetOutputSlot().SetTensorInfo(info0); constant->GetOutputSlot().SetTensorInfo(info1); @@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest) input->GetOutputSlot().SetTensorInfo(inputInfo); constant->GetOutputSlot().SetTensorInfo(constantTermInfo); float tensor[] = { 2.0f }; - constant->m_LayerOutput = std::make_unique(ConstTensor(constantTermInfo, &tensor)); + constant->m_LayerOutput = std::make_unique(ConstTensor(constantTermInfo, &tensor)); add1->GetOutputSlot().SetTensorInfo(outputInfo); input->GetOutputSlot().Connect(add1->GetInputSlot(0)); diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp index bb8e674b56..e4c1f2f413 100644 --- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp @@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest) input->GetOutputSlot().SetTensorInfo(info); auto fc = graph.AddLayer(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique(weights); + fc->m_Weight = std::make_unique(weights); fc->GetOutputSlot().SetTensorInfo(info); auto output = graph.AddLayer(1, "output"); @@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest) input->GetOutputSlot().SetTensorInfo(info); auto fc = graph.AddLayer(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique(weights); + fc->m_Weight = std::make_unique(weights); fc->GetOutputSlot().SetTensorInfo(info); auto output = graph.AddLayer(1, "output"); diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp index 12df462456..1dfe7f431c 100644 --- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp @@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest) input->GetOutputSlot().SetTensorInfo(info); auto fc = graph.AddLayer(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique(weights); + fc->m_Weight = std::make_unique(weights); fc->GetOutputSlot().SetTensorInfo(info); auto output = graph.AddLayer(1, "output"); diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp index 7d7c6b2b0a..1ddf5262e8 100644 --- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp @@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest) input->GetOutputSlot().SetTensorInfo(info); auto fc = graph.AddLayer(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique(weights); + fc->m_Weight = std::make_unique(weights); fc->GetOutputSlot().SetTensorInfo(info); auto output = graph.AddLayer(1, "output"); diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp index a65012eef4..f93fa77b0d 100644 --- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp +++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp @@ -72,8 +72,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest) armnn::Convolution2dDescriptor descriptor; auto conv = graph.AddLayer(descriptor, "conv2d"); - conv->m_Weight = std::make_unique(weights); - conv->m_Bias = std::make_unique(bias); + conv->m_Weight = std::make_unique(weights); + conv->m_Bias = std::make_unique(bias); conv->GetOutputSlot().SetTensorInfo(infoFP32); auto output = graph.AddLayer(1, "output"); @@ -142,8 +142,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest) armnn::FullyConnectedDescriptor descriptor; auto fc = graph.AddLayer(descriptor, "fully"); - fc->m_Weight = std::make_unique(weights); - fc->m_Bias = std::make_unique(bias); + fc->m_Weight = std::make_unique(weights); + fc->m_Bias = std::make_unique(bias); fc->GetOutputSlot().SetTensorInfo(infoFP32); auto output = graph.AddLayer(1, "output"); -- cgit v1.2.1