aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2022-11-25 13:55:24 +0000
committermike.kelly <mike.kelly@arm.com>2022-12-12 15:58:21 +0000
commitec67a0f08e0f96a5aebf3cac65331c67f6649f5e (patch)
tree94146a1f43c74d89d83fd5da54688ae0fc19cf85 /src/backends/backendsCommon
parent5383767a7a759c867235ab66bd71f88281e3bd06 (diff)
downloadarmnn-ec67a0f08e0f96a5aebf3cac65331c67f6649f5e.tar.gz
IVGCVSW-7209 Remove deprecated code due to be removed in 23.02
* Removed weights and bias from Convolution, DepthwiseConv & FullyConnected layers * Removed the weight and bias ConstTensorHandles from the QueueDescriptors * Updated Workloads to take tensors from WorkloadInfo rather than the QueueDescriptors * Removed unused RedirectMembersToConstantInputs optimization and tests. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I9ffcdc4a1c0dff725539dd69fc435b700bd98a56
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp4
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp36
-rw-r--r--src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp126
-rw-r--r--src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp124
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp27
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp111
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp11
7 files changed, 154 insertions, 285 deletions
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 0d98804954..6a673c6e93 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1469,8 +1469,6 @@ void CreateReferenceDynamicBackendTestImpl()
{ outputInfo }
};
convolution2dQueueDescriptor.m_Inputs.push_back(nullptr);
- auto weights = std::make_unique<ScopedTensorHandle>(weightInfo);
- convolution2dQueueDescriptor.m_Weight = weights.get();
// Create a convolution workload with the dummy settings
auto workload = referenceWorkloadFactory->CreateWorkload(LayerType::Convolution2d,
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 18f11a542e..fb7a0271d4 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -242,10 +242,6 @@ struct DummyConvolutionLayer
desc.m_StrideX = 1;
desc.m_StrideY = 1;
m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
~DummyConvolutionLayer()
@@ -268,10 +264,28 @@ struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
{
};
-template<>
+// Note: When m_Weight and m_Bias are removed from TransposeConvolution, Transpose can use DummyConvolutionLayer
+template <>
struct DummyLayer<armnn::TransposeConvolution2dLayer>
- : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
{
+ DummyLayer()
+ {
+ typename armnn::TransposeConvolution2dLayer::DescriptorType desc;
+ desc.m_StrideX = 1;
+ desc.m_StrideY = 1;
+ m_Layer = dummyGraph.AddLayer<armnn::TransposeConvolution2dLayer>(desc, "");
+ m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+ m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+ }
+
+ ~DummyLayer()
+ {
+ dummyGraph.EraseLayer(m_Layer);
+ }
+
+ armnn::TransposeConvolution2dLayer* m_Layer;
};
template<>
@@ -518,8 +532,6 @@ struct DummyLayer<armnn::FullyConnectedLayer>
{
armnn::FullyConnectedLayer::DescriptorType desc;
m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
~DummyLayer()
@@ -767,12 +779,6 @@ unsigned int GetNumOutputs(const armnn::Layer& layer)
return layer.GetNumOutputSlots();
}
-template<>
-unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
-{
- IgnoreUnused(layer);
- return 2;
-}
// Tests that the IsLayerSupported() function returns the correct value.
// We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index abfb621c93..b7b514573c 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -12,8 +12,6 @@
#include <doctest/doctest.h>
-#include <utility>
-
using namespace armnn;
using namespace std;
@@ -84,29 +82,15 @@ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
layerDesc.m_StrideY = 4;
layerDesc.m_BiasEnabled = true;
- Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 },
- armnn::DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
- ConstantLayer* biasLayer = graph.AddLayer<ConstantLayer>("Bias");
+ auto* const convolutionLayer = graph.AddLayer<Convolution2dLayer>(layerDesc, "convolution");
+ auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 },
- armnn::DataType::Float32));
+ TensorInfo weightsInfo = TensorInfo({ 2, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true);
+ TensorInfo biasInfo = TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true);
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(
- TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
-
- TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo();
- weightsInfo.SetConstant();
- TensorInfo biasInfo = biasLayer->m_LayerOutput->GetTensorInfo();
- biasInfo.SetConstant();
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
+ biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
@@ -116,21 +100,21 @@ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32));
- weightsLayer->GetOutputSlot().Connect(layer->GetInputSlot(1));
- biasLayer->GetOutputSlot().Connect(layer->GetInputSlot(2));
- Connect(layer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
+ Connect(input, convolutionLayer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32));
+ weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1));
+ biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2));
+ Connect(convolutionLayer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
// check the constants that they are not NULL
CHECK(weightsLayer->m_LayerOutput != nullptr);
CHECK(biasLayer->m_LayerOutput != nullptr);
- // free up the constants..
- layer->ReleaseConstantData();
+ // free up the constants.
+ convolutionLayer->ReleaseConstantData();
- // check the constants that they are NULL now
- CHECK(weightsLayer->m_LayerOutput == nullptr);
- CHECK(biasLayer->m_LayerOutput == nullptr);
+ // check the constants that they are still not NULL
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
}
TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
@@ -147,33 +131,39 @@ TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
layerDesc.m_StrideY = 4;
layerDesc.m_BiasEnabled = true;
- DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
+ auto* const convolutionLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "convolution");
+ auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
+
+ TensorInfo weightsInfo = TensorInfo({ 3, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true);
+ TensorInfo biasInfo = TensorInfo({ 9 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true);
+
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
+ biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(
- TensorInfo({3, 3, 5, 3}, DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(
- TensorInfo({9}, DataType::Float32));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
// create extra layers
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
- Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
+ Connect(input, convolutionLayer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
+ weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1));
+ biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2));
+ Connect(convolutionLayer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
// check the constants that they are not NULL
- CHECK(layer->m_Weight != nullptr);
- CHECK(layer->m_Bias != nullptr);
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
- // free up the constants..
- layer->ReleaseConstantData();
+ // free up the constants.
+ convolutionLayer->ReleaseConstantData();
- // check the constants that they are NULL now
- CHECK(layer->m_Weight == nullptr);
- CHECK(layer->m_Bias == nullptr);
+ // check the constants that they are still not NULL
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
}
TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
@@ -185,36 +175,42 @@ TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
layerDesc.m_BiasEnabled = true;
layerDesc.m_TransposeWeightMatrix = true;
- FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+ auto* const fullyConnectedLayer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+ auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
float inputsQScale = 1.0f;
float outputQScale = 2.0f;
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(
- TensorInfo({7, 20}, DataType::QAsymmU8, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(
- TensorInfo({7}, GetBiasDataType(DataType::QAsymmU8), inputsQScale));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
+ TensorInfo weightsInfo = TensorInfo({ 7, 20 }, DataType::QAsymmU8, inputsQScale, 0.0, true);
+ TensorInfo biasInfo = TensorInfo({ 7 }, GetBiasDataType(DataType::QAsymmU8), inputsQScale, 0.0, true);
+
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
+ biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
+
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
// create extra layers
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
- Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
+ Connect(input, fullyConnectedLayer, TensorInfo({ 3, 1, 4, 5 }, DataType::QAsymmU8, inputsQScale));
+ weightsLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(1));
+ biasLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(2));
+ Connect(fullyConnectedLayer, output, TensorInfo({ 3, 7 }, DataType::QAsymmU8, outputQScale));
// check the constants that they are not NULL
- CHECK(layer->m_Weight != nullptr);
- CHECK(layer->m_Bias != nullptr);
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
- // free up the constants..
- layer->ReleaseConstantData();
+ // free up the constants.
+ fullyConnectedLayer->ReleaseConstantData();
- // check the constants that they are NULL now
- CHECK(layer->m_Weight == nullptr);
- CHECK(layer->m_Bias == nullptr);
+ // check the constants that they are still not NULL
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
}
}
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 67354696b0..997fe9850d 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -94,13 +94,10 @@ Convolution2dLayer* AddConvolutionLayer(Graph& graph,
LayerNameToLayerMap& layersInGraph,
const Convolution2dDescriptor& convolutionDescriptor,
const std::string& layerName,
- const TensorInfo& weightInfo,
- const TensorInfo& biasInfo,
const TensorInfo& outputInfo)
{
Convolution2dLayer* const convLayer = graph.AddLayer<Convolution2dLayer>(convolutionDescriptor, layerName.c_str());
CHECK(convLayer);
- SetWeightAndBias(convLayer, weightInfo, biasInfo);
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
layersInGraph.insert(std::make_pair(convLayer->GetName(), convLayer));
return convLayer;
@@ -335,11 +332,11 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, Layer
// Construct the graph
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
Convolution2dLayer* const convLayer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv layer", weightInfo, biasInfo, outputInfo);
+ "conv layer", outputInfo);
ConstantLayer* const weightsLayer =
- AddConstantLayer(graph, layersInGraph, "Weights Layer", constWeightsTensor, outputInfo);
- ConstantLayer* const biasLayer = AddConstantLayer(graph, layersInGraph, "Bias Layer", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer", constWeightsTensor, weightInfo);
+ ConstantLayer* const biasLayer = AddConstantLayer(graph, layersInGraph, "Bias Layer", constBiasTensor, biasInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -373,7 +370,6 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, Layer
std::vector<float> biasVector(16);
ConstTensor constBiasTensor(biasInfo, biasVector);
-
Convolution2dDescriptor convolutionDescriptor;
convolutionDescriptor.m_StrideX = 1;
convolutionDescriptor.m_StrideY = 1;
@@ -383,40 +379,40 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, Layer
// Construct the graph
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
+ "conv1 layer", outputInfo);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv2 layer", weightInfo, biasInfo, outputInfo);
+ "conv2 layer", outputInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, biasInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv3 layer", weightInfo, biasInfo, outputInfo);
+ "conv3 layer", outputInfo);
ConstantLayer* const weightsLayer3 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer3 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, biasInfo);
Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv4 layer", weightInfo, biasInfo, outputInfo);
+ "conv4 layer", outputInfo);
ConstantLayer* const weightsLayer4 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 4", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 4", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer4 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 4", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 4", constBiasTensor, biasInfo);
Convolution2dLayer* const conv5Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv5 layer", weightInfo, biasInfo, outputInfo);
+ "conv5 layer", outputInfo);
ConstantLayer* const weightsLayer5 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer5 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, biasInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -504,26 +500,26 @@ SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, Laye
// Construct the graph
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
+ "conv1 layer", outputInfo);
Pooling2dLayer* const pooling1Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor,
"pooling1 layer", outputInfo);
Pooling2dLayer* const pooling2Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor,
"pooling2 layer", outputInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, biasInfo);
Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv2 layer", weightInfo, biasInfo, outputInfo);
+ "conv2 layer", outputInfo);
Pooling2dLayer* const pooling3Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor,
"pooling3 layer", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -581,14 +577,13 @@ SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, Lay
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
ConstantLayer* const weightsLayer =
- AddConstantLayer(graph, layersInGraph, "Weights Layer unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer =
- AddConstantLayer(graph, layersInGraph, "Bias Layer unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer unoptimizable", constBiasTensor, biasInfo);
Convolution2dLayer* const convLayer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv layer unoptimizable", weightInfo, biasInfo,
- outputInfo);
+ "conv layer unoptimizable", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
// Connect the network
@@ -631,46 +626,36 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, L
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer3 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer3 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer4 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 4 unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 4 unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer4 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 4 unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 4 unoptimizable", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer5 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer5 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, biasInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
- Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph,
- layersInGraph,
- convolutionDescriptor,
- "conv2 layer unoptimizable",
- weightInfo,
- biasInfo,
- outputInfo);
+ "conv1 layer", outputInfo);
+ Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
+ "conv2 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv3 layer", weightInfo, biasInfo, outputInfo);
- Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph,
- layersInGraph,
- convolutionDescriptor,
- "conv4 layer unoptimizable",
- weightInfo,
- biasInfo,
- outputInfo);
+ "conv3 layer", outputInfo);
+ Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
+ "conv4 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv5Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv5 layer", weightInfo, biasInfo, outputInfo);
+ "conv5 layer", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -747,25 +732,24 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, L
Layer* const input2Layer = AddInputLayer(graph, "input2 layer", inputInfo, 1);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer3 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer3 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, biasInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
+ "conv1 layer", outputInfo);
Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv2 layer unoptimizable", weightInfo, biasInfo,
- outputInfo);
+ "conv2 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv3 layer", weightInfo, biasInfo, outputInfo);
+ "conv3 layer", outputInfo);
AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index fed21eb911..2a09f6508f 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -158,13 +158,8 @@ TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
FullyConnectedQueueDescriptor invalidData;
WorkloadInfo invalidInfo;
- ScopedTensorHandle weightTensor(weightsDesc);
- ScopedTensorHandle biasTensor(biasesDesc);
-
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
- invalidData.m_Weight = &weightTensor;
- invalidData.m_Bias = &biasTensor;
invalidData.m_Parameters.m_BiasEnabled = true;
invalidData.m_Parameters.m_TransposeWeightMatrix = false;
@@ -678,16 +673,10 @@ TEST_CASE("BiasPerAxisQuantization_ValidateCorrectValues")
AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedTensorHandle weightTensor(weightInfo);
- queueDescriptor.m_Weight = &weightTensor;
-
// Test 1: correct per-axis quantization values
const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
- ScopedTensorHandle biasHandle1(biasInfo1);
- queueDescriptor.m_Bias = &biasHandle1;
-
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo1, nullptr);
CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
@@ -729,16 +718,10 @@ TEST_CASE("BiasPerAxisQuantization_ValidateIncorrectValues")
AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedTensorHandle weightTensor(weightInfo);
- queueDescriptor.m_Weight = &weightTensor;
-
- // Test 2: wrong per-axis quantization values
+ // Test 2: wrong per-axis quantization values
const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
- ScopedTensorHandle biasHandle2(biasInfo2);
- queueDescriptor.m_Bias = &biasHandle2;
-
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo2, nullptr);
CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
@@ -781,16 +764,10 @@ TEST_CASE("BiasPerAxisQuantization_ValidateInvalidArgumentException")
AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedTensorHandle weightTensor(weightInfo);
- queueDescriptor.m_Weight = &weightTensor;
-
// Test 3: mismatched number of quantization scales
const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
- ScopedTensorHandle biasHandle3(biasInfo3);
- queueDescriptor.m_Bias = &biasHandle3;
-
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo3, nullptr);
CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 1e0adc169a..8a2d8c8696 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -313,8 +313,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedTensorHandle biasTensor(biasDesc);
// Permute the kernel if necessary
std::vector<T> kernel = originalKernel;
@@ -322,12 +320,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
{
armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
}
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
- if(biasEnabled)
- {
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- }
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
@@ -339,8 +331,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -442,15 +432,13 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
+// armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+// AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
- armnn::ScopedTensorHandle biasTensor(biasDesc);
+// armnn::ScopedTensorHandle biasTensor(biasDesc);
armnn::Convolution2dQueueDescriptor data;
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -590,18 +578,16 @@ LayerTestResult<T,4> Convolution1dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelInfo);
- armnn::ScopedTensorHandle biasTensor(biasInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
+// armnn::ScopedTensorHandle weightsTensor(kernelInfo);
+// armnn::ScopedTensorHandle biasTensor(biasInfo);
+//
+// AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
+// AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
AddInputToWorkload(data, info, inputInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelInfo, weightsHandle.get());
AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
data.m_Parameters.m_StrideX = 1;
data.m_Parameters.m_StrideY = stride;
data.m_Parameters.m_PadLeft = 0;
@@ -1421,25 +1407,14 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedTensorHandle biasTensor(biasDesc);
-
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernel.data());
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padX;
@@ -1827,15 +1802,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
-
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // Needed in Neon and Cl Workload when permuting. Backend TensorHandle in (2) below will not work.
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- // Cannot PolymorphicDowncast from ScopedTensorHandle->RefTensorHandle.
- // Need to PolymorphicDowncast from ITensorHandle->RefTensorHandle.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -1852,8 +1818,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -2009,12 +1973,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2031,8 +1989,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
data.m_Parameters.m_StrideX = 1;
data.m_Parameters.m_StrideY = 1;
data.m_Parameters.m_PadLeft = 0;
@@ -2234,12 +2190,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2256,8 +2206,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
data.m_Parameters.m_StrideX = 2;
data.m_Parameters.m_StrideY = 1;
data.m_Parameters.m_PadLeft = 0;
@@ -2426,12 +2374,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2448,8 +2390,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -3177,25 +3117,14 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(aclKernelDescriptor);
- armnn::ScopedTensorHandle biasTensor(biasDesc);
-
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, aclKernelDescriptor, weightsHandle.get());
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), aclKernelData.data());
- AllocateAndCopyDataToITensorHandle(&weightsTensor, aclKernelData.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padX;
@@ -3696,16 +3625,14 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
WorkloadInfo workloadInfo;
- ScopedTensorHandle weightTensor(kernelInfo);
- ScopedTensorHandle biasTensor(biasInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
+// ScopedTensorHandle weightTensor(kernelInfo);
+// ScopedTensorHandle biasTensor(biasInfo);
+//
+// AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
+// AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
Convolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
- queueDescriptor.m_Weight = &weightTensor;
- queueDescriptor.m_Bias = &biasTensor;
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
@@ -3995,26 +3922,16 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
DepthwiseConvolution2dQueueDescriptor queueDescriptor;
WorkloadInfo workloadInfo;
- ScopedTensorHandle weightTensor(kernelInfo);
- ScopedTensorHandle biasTensor(biasInfo);
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo, biasHandle.get());
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
- AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
queueDescriptor.m_Parameters = descriptor;
- queueDescriptor.m_Weight = &weightTensor;
- queueDescriptor.m_Bias = &biasTensor;
std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
queueDescriptor,
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 5555772c5a..f7519a73bc 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,20 +44,11 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::FullyConnectedQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
- armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
-
data.m_Parameters.m_BiasEnabled = biasEnabled;
data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
data.m_Parameters.m_ConstantWeights = constantWeights;