diff options
author | telsoa01 <telmo.soares@arm.com> | 2018-08-31 09:22:23 +0100 |
---|---|---|
committer | telsoa01 <telmo.soares@arm.com> | 2018-08-31 09:22:23 +0100 |
commit | c577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch) | |
tree | bd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnn/backends/test/LayerReleaseConstantDataTest.cpp | |
parent | 4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff) | |
download | armnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz |
Release 18.08
Diffstat (limited to 'src/armnn/backends/test/LayerReleaseConstantDataTest.cpp')
-rw-r--r-- | src/armnn/backends/test/LayerReleaseConstantDataTest.cpp | 212 |
1 files changed, 212 insertions, 0 deletions
diff --git a/src/armnn/backends/test/LayerReleaseConstantDataTest.cpp b/src/armnn/backends/test/LayerReleaseConstantDataTest.cpp new file mode 100644 index 0000000000..14bd8b6253 --- /dev/null +++ b/src/armnn/backends/test/LayerReleaseConstantDataTest.cpp @@ -0,0 +1,212 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <boost/test/unit_test.hpp> +#include <boost/cast.hpp> + +#include "backends/WorkloadData.hpp" +#include "Graph.hpp" + +#include <utility> + +#include "backends/CpuTensorHandle.hpp" +#include "backends/ClWorkloadFactory.hpp" + +using namespace armnn; +using namespace std; + +// connects two layers +void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0) +{ + from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); + from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +// The following test are created specifically to test ReleaseConstantData() method in the Layer +// They build very simple graphs including the layer will be checked. +// Checks weights and biases before the method called and after. +///////////////////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest) + +BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest) +{ + Graph graph; + ClWorkloadFactory factory; + + // create the layer we're testing + BatchNormalizationDescriptor layerDesc; + layerDesc.m_Eps = 0.05f; + BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer"); + + armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32); + layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Mean->Allocate(); + layer->m_Variance->Allocate(); + layer->m_Beta->Allocate(); + layer->m_Gamma->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32); + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + + // check the constants that they are not NULL + BOOST_CHECK(layer->m_Mean != nullptr); + BOOST_CHECK(layer->m_Variance != nullptr); + BOOST_CHECK(layer->m_Beta != nullptr); + BOOST_CHECK(layer->m_Gamma != nullptr); + + // free up the constants.. + layer->ReleaseConstantData(); + + // check the constants that they are NULL now + BOOST_CHECK(layer->m_Mean == nullptr); + BOOST_CHECK(layer->m_Variance == nullptr); + BOOST_CHECK(layer->m_Beta == nullptr); + BOOST_CHECK(layer->m_Gamma == nullptr); + + } + + + BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest) + { + Graph graph; + ClWorkloadFactory factory; + + // create the layer we're testing + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 3; + layerDesc.m_PadRight = 3; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 4; + layerDesc.m_BiasEnabled = true; + + Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer"); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3}, + armnn::DataType::Float32)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle> + (TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32))); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32)); + Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32)); + + // check the constants that they are not NULL + BOOST_CHECK(layer->m_Weight != nullptr); + BOOST_CHECK(layer->m_Bias != nullptr); + + // free up the constants.. + layer->ReleaseConstantData(); + + // check the constants that they are NULL now + BOOST_CHECK(layer->m_Weight == nullptr); + BOOST_CHECK(layer->m_Bias == nullptr); +} + +BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest) +{ + Graph graph; + ClWorkloadFactory factory; + + // create the layer we're testing + DepthwiseConvolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 3; + layerDesc.m_PadRight = 3; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 4; + layerDesc.m_BiasEnabled = true; + + DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer"); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32)); + Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32)); + + // check the constants that they are not NULL + BOOST_CHECK(layer->m_Weight != nullptr); + BOOST_CHECK(layer->m_Bias != nullptr); + + // free up the constants.. + layer->ReleaseConstantData(); + + // check the constants that they are NULL now + BOOST_CHECK(layer->m_Weight == nullptr); + BOOST_CHECK(layer->m_Bias == nullptr); +} + +BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest) +{ + Graph graph; + ClWorkloadFactory factory; + + // create the layer we're testing + FullyConnectedDescriptor layerDesc; + layerDesc.m_BiasEnabled = true; + layerDesc.m_TransposeWeightMatrix = true; + + FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer"); + + float inputsQScale = 1.0f; + float outputQScale = 2.0f; + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, + DataType::QuantisedAsymm8, inputsQScale, 0)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, + GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale)); + Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale)); + + // check the constants that they are not NULL + BOOST_CHECK(layer->m_Weight != nullptr); + BOOST_CHECK(layer->m_Bias != nullptr); + + // free up the constants.. + layer->ReleaseConstantData(); + + // check the constants that they are NULL now + BOOST_CHECK(layer->m_Weight == nullptr); + BOOST_CHECK(layer->m_Bias == nullptr); +} + +BOOST_AUTO_TEST_SUITE_END() + |