diff options
author | Mike Kelly <mike.kelly@arm.com> | 2022-11-25 13:55:24 +0000 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2022-12-12 15:58:21 +0000 |
commit | ec67a0f08e0f96a5aebf3cac65331c67f6649f5e (patch) | |
tree | 94146a1f43c74d89d83fd5da54688ae0fc19cf85 /src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp | |
parent | 5383767a7a759c867235ab66bd71f88281e3bd06 (diff) | |
download | armnn-ec67a0f08e0f96a5aebf3cac65331c67f6649f5e.tar.gz |
IVGCVSW-7209 Remove deprecated code due to be removed in 23.02
* Removed weights and bias from Convolution, DepthwiseConv & FullyConnected
layers
* Removed the weight and bias ConstTensorHandles from the QueueDescriptors
* Updated Workloads to take tensors from WorkloadInfo rather than the
QueueDescriptors
* Removed unused RedirectMembersToConstantInputs optimization and tests.
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I9ffcdc4a1c0dff725539dd69fc435b700bd98a56
Diffstat (limited to 'src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp | 126 |
1 files changed, 61 insertions, 65 deletions
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp index abfb621c93..b7b514573c 100644 --- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp +++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -12,8 +12,6 @@ #include <doctest/doctest.h> -#include <utility> - using namespace armnn; using namespace std; @@ -84,29 +82,15 @@ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest") layerDesc.m_StrideY = 4; layerDesc.m_BiasEnabled = true; - Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer"); - - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 }, - armnn::DataType::Float32)); - layer->m_Bias = std::make_unique<ScopedTensorHandle> - (TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32))); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - - ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights"); - ConstantLayer* biasLayer = graph.AddLayer<ConstantLayer>("Bias"); + auto* const convolutionLayer = graph.AddLayer<Convolution2dLayer>(layerDesc, "convolution"); + auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights"); + auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias"); - weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 }, - armnn::DataType::Float32)); + TensorInfo weightsInfo = TensorInfo({ 2, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true); + TensorInfo biasInfo = TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true); - biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>( - TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32))); - - TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo(); - weightsInfo.SetConstant(); - TensorInfo biasInfo = biasLayer->m_LayerOutput->GetTensorInfo(); - biasInfo.SetConstant(); + weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo); + biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo); weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo); biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo); @@ -116,21 +100,21 @@ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest") Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); // connect up - Connect(input, layer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32)); - weightsLayer->GetOutputSlot().Connect(layer->GetInputSlot(1)); - biasLayer->GetOutputSlot().Connect(layer->GetInputSlot(2)); - Connect(layer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32)); + Connect(input, convolutionLayer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32)); + weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1)); + biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2)); + Connect(convolutionLayer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32)); // check the constants that they are not NULL CHECK(weightsLayer->m_LayerOutput != nullptr); CHECK(biasLayer->m_LayerOutput != nullptr); - // free up the constants.. - layer->ReleaseConstantData(); + // free up the constants. + convolutionLayer->ReleaseConstantData(); - // check the constants that they are NULL now - CHECK(weightsLayer->m_LayerOutput == nullptr); - CHECK(biasLayer->m_LayerOutput == nullptr); + // check the constants that they are still not NULL + CHECK(weightsLayer->m_LayerOutput != nullptr); + CHECK(biasLayer->m_LayerOutput != nullptr); } TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest") @@ -147,33 +131,39 @@ TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest") layerDesc.m_StrideY = 4; layerDesc.m_BiasEnabled = true; - DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer"); + auto* const convolutionLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "convolution"); + auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights"); + auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias"); + + TensorInfo weightsInfo = TensorInfo({ 3, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true); + TensorInfo biasInfo = TensorInfo({ 9 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true); + + weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo); + biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo); - layer->m_Weight = std::make_unique<ScopedTensorHandle>( - TensorInfo({3, 3, 5, 3}, DataType::Float32)); - layer->m_Bias = std::make_unique<ScopedTensorHandle>( - TensorInfo({9}, DataType::Float32)); - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo); + biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo); // create extra layers Layer* const input = graph.AddLayer<InputLayer>(0, "input"); Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); // connect up - Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32)); - Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32)); + Connect(input, convolutionLayer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32)); + weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1)); + biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2)); + Connect(convolutionLayer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32)); // check the constants that they are not NULL - CHECK(layer->m_Weight != nullptr); - CHECK(layer->m_Bias != nullptr); + CHECK(weightsLayer->m_LayerOutput != nullptr); + CHECK(biasLayer->m_LayerOutput != nullptr); - // free up the constants.. - layer->ReleaseConstantData(); + // free up the constants. + convolutionLayer->ReleaseConstantData(); - // check the constants that they are NULL now - CHECK(layer->m_Weight == nullptr); - CHECK(layer->m_Bias == nullptr); + // check the constants that they are still not NULL + CHECK(weightsLayer->m_LayerOutput != nullptr); + CHECK(biasLayer->m_LayerOutput != nullptr); } TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest") @@ -185,36 +175,42 @@ TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest") layerDesc.m_BiasEnabled = true; layerDesc.m_TransposeWeightMatrix = true; - FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer"); + auto* const fullyConnectedLayer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer"); + auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights"); + auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias"); float inputsQScale = 1.0f; float outputQScale = 2.0f; - layer->m_Weight = std::make_unique<ScopedTensorHandle>( - TensorInfo({7, 20}, DataType::QAsymmU8, inputsQScale, 0)); - layer->m_Bias = std::make_unique<ScopedTensorHandle>( - TensorInfo({7}, GetBiasDataType(DataType::QAsymmU8), inputsQScale)); - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); + TensorInfo weightsInfo = TensorInfo({ 7, 20 }, DataType::QAsymmU8, inputsQScale, 0.0, true); + TensorInfo biasInfo = TensorInfo({ 7 }, GetBiasDataType(DataType::QAsymmU8), inputsQScale, 0.0, true); + + weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo); + biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo); + + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo); + biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo); // create extra layers Layer* const input = graph.AddLayer<InputLayer>(0, "input"); Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); // connect up - Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale)); - Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale)); + Connect(input, fullyConnectedLayer, TensorInfo({ 3, 1, 4, 5 }, DataType::QAsymmU8, inputsQScale)); + weightsLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(1)); + biasLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(2)); + Connect(fullyConnectedLayer, output, TensorInfo({ 3, 7 }, DataType::QAsymmU8, outputQScale)); // check the constants that they are not NULL - CHECK(layer->m_Weight != nullptr); - CHECK(layer->m_Bias != nullptr); + CHECK(weightsLayer->m_LayerOutput != nullptr); + CHECK(biasLayer->m_LayerOutput != nullptr); - // free up the constants.. - layer->ReleaseConstantData(); + // free up the constants. + fullyConnectedLayer->ReleaseConstantData(); - // check the constants that they are NULL now - CHECK(layer->m_Weight == nullptr); - CHECK(layer->m_Bias == nullptr); + // check the constants that they are still not NULL + CHECK(weightsLayer->m_LayerOutput != nullptr); + CHECK(biasLayer->m_LayerOutput != nullptr); } } |