aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp')
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp21
1 files changed, 14 insertions, 7 deletions
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 34e5f6d3b6..118907e703 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -1,12 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <TestUtils.hpp>
-#include <Optimizer.hpp>
#include <Half.hpp>
+#include <Optimizer.hpp>
#include <doctest/doctest.h>
@@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsFloatToHalfTest")
// Create const tensor from fp32 data
unsigned int dims[] = { 4, 1, 1, 1 };
std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
+ armnn::TensorInfo weightsInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::ConstTensor weights(weightsInfo, floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
+ auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("weights");
+ weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo);
+
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
// Connect up the layers
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether data matches expected fp16 data
- const Half* data = fc->m_Weight->GetConstTensor<Half>();
+ const Half* data = weightsLayer->m_LayerOutput->GetConstTensor<Half>();
CHECK(data[0] == Half(1.0f));
CHECK(data[1] == Half(2.0f));
CHECK(data[2] == Half(3.0f));
@@ -100,12 +105,14 @@ TEST_CASE("ConvertConstantsFloatToHalfTest_constant")
fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
+ CHECK(5 == graph.GetNumLayers());
CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
+ CHECK(5 == graph.GetNumLayers());
CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether weights data matches expected fp16 data