diff options
author | Mike Kelly <mike.kelly@arm.com> | 2022-11-25 13:55:24 +0000 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2022-12-12 15:58:21 +0000 |
commit | ec67a0f08e0f96a5aebf3cac65331c67f6649f5e (patch) | |
tree | 94146a1f43c74d89d83fd5da54688ae0fc19cf85 /src/armnnTestUtils/CreateWorkload.hpp | |
parent | 5383767a7a759c867235ab66bd71f88281e3bd06 (diff) | |
download | armnn-ec67a0f08e0f96a5aebf3cac65331c67f6649f5e.tar.gz |
IVGCVSW-7209 Remove deprecated code due to be removed in 23.02
* Removed weights and bias from Convolution, DepthwiseConv & FullyConnected
layers
* Removed the weight and bias ConstTensorHandles from the QueueDescriptors
* Updated Workloads to take tensors from WorkloadInfo rather than the
QueueDescriptors
* Removed unused RedirectMembersToConstantInputs optimization and tests.
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I9ffcdc4a1c0dff725539dd69fc435b700bd98a56
Diffstat (limited to 'src/armnnTestUtils/CreateWorkload.hpp')
-rw-r--r-- | src/armnnTestUtils/CreateWorkload.hpp | 39 |
1 files changed, 1 insertions, 38 deletions
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp index 905b8fa50b..0846d21388 100644 --- a/src/armnnTestUtils/CreateWorkload.hpp +++ b/src/armnnTestUtils/CreateWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -495,10 +495,6 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType)); - layer->m_Weight->Allocate(); - armnn::TensorInfo weightsTensorInfo(weightShape, DataType, inputsQScale); weightsTensorInfo.SetConstant(); @@ -562,12 +558,6 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3}; TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType)); - layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType))); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); armnn::TensorInfo weightsTensorInfo(weightShape, DataType, inputsQScale); weightsTensorInfo.SetConstant(); @@ -662,12 +652,6 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a TensorShape biasShape = TensorShape{ 32 }; TensorShape inputShape = TensorShape{ 1, 32, 149, 149 }; TensorShape outputShape = TensorShape{ 1, 32, 147, 147 }; - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType)); - layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo(biasShape, GetBiasDataType(DataType))); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); armnn::TensorInfo weightsTensorInfo(weightShape, DataType, inputsQScale); weightsTensorInfo.SetConstant(); @@ -1132,12 +1116,6 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm armnn::TensorInfo biasTensorInfo(biasShape, GetBiasDataType(DataType), inputsQScale); biasTensorInfo.SetConstant(); - layer->m_Weight = std::make_unique<ScopedTensorHandle>(weightsTensorInfo); - layer->m_Bias = std::make_unique<ScopedTensorHandle>(biasTensorInfo); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - // Creates extra layers. Layer* const input = graph.AddLayer<InputLayer>(0, "input"); auto const weights = graph.AddLayer<ConstantLayer>("weights"); @@ -1170,8 +1148,6 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm CHECK(queueDescriptor.m_Inputs.size() == 3); CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Weight->GetTensorInfo() == weightsTensorInfo)); - CHECK((queueDescriptor.m_Bias->GetTensorInfo() == biasTensorInfo)); // Returns so we can do extra, backend-specific tests. return workload; @@ -1203,9 +1179,6 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer"); - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType)); // [ 1, H, W, I*M ] - layer->m_Weight->Allocate(); // Creates extra layers. Layer* const input = graph.AddLayer<InputLayer>(0, "input"); @@ -1252,10 +1225,6 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn:: float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0)); - layer->m_Weight->Allocate(); - armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale); weightsTensorInfo.SetConstant(); @@ -1301,12 +1270,6 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0)); - layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)); - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale); armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale); weightsTensorInfo.SetConstant(); |