aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2022-11-25 13:55:24 +0000
committermike.kelly <mike.kelly@arm.com>2022-12-12 15:58:21 +0000
commitec67a0f08e0f96a5aebf3cac65331c67f6649f5e (patch)
tree94146a1f43c74d89d83fd5da54688ae0fc19cf85
parent5383767a7a759c867235ab66bd71f88281e3bd06 (diff)
downloadarmnn-ec67a0f08e0f96a5aebf3cac65331c67f6649f5e.tar.gz
IVGCVSW-7209 Remove deprecated code due to be removed in 23.02
* Removed weights and bias from Convolution, DepthwiseConv & FullyConnected layers * Removed the weight and bias ConstTensorHandles from the QueueDescriptors * Updated Workloads to take tensors from WorkloadInfo rather than the QueueDescriptors * Removed unused RedirectMembersToConstantInputs optimization and tests. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I9ffcdc4a1c0dff725539dd69fc435b700bd98a56
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/backends/WorkloadData.hpp29
-rw-r--r--src/armnn/LoadedNetwork.cpp2
-rw-r--r--src/armnn/Network.cpp139
-rw-r--r--src/armnn/Network.hpp43
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp29
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp17
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp28
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp18
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp25
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp12
-rw-r--r--src/armnn/optimizations/All.hpp1
-rw-r--r--src/armnn/optimizations/FoldPadIntoLayer2d.hpp19
-rw-r--r--src/armnn/optimizations/FuseBatchNorm.hpp8
-rw-r--r--src/armnn/optimizations/RedirectMembersToConstantInputs.hpp90
-rw-r--r--src/armnn/test/OptimizerTests.cpp35
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp21
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp17
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp15
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp27
-rw-r--r--src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp85
-rw-r--r--src/armnnTestUtils/CommonTestUtils.hpp11
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp39
-rw-r--r--src/backends/aclCommon/ArmComputeSubgraphUtils.hpp8
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp4
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp36
-rw-r--r--src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp126
-rw-r--r--src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp124
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp27
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp111
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp11
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp9
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp14
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.hpp4
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp34
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.hpp10
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp33
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp11
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp38
43 files changed, 374 insertions, 956 deletions
diff --git a/Android.mk b/Android.mk
index e9f70c9417..0e4e6f90d4 100644
--- a/Android.mk
+++ b/Android.mk
@@ -450,7 +450,6 @@ LOCAL_SRC_FILES := \
src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp \
src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp \
src/armnn/test/optimizations/PermuteAsReshapeTests.cpp \
- src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp \
src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp \
src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp \
src/armnn/test/optimizations/TransposeAsReshapeTests.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 76fb958f07..fc4207b258 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -591,7 +591,6 @@ if(BUILD_UNIT_TESTS)
src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
- src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index 4fbb6d423a..7a0a765f8e 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -179,15 +179,6 @@ struct FillQueueDescriptor : QueueDescriptorWithParameters<FillDescriptor>
// Fully connected layer workload data.
struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnectedDescriptor>
{
- FullyConnectedQueueDescriptor()
- : m_Weight(nullptr)
- , m_Bias(nullptr)
- {
- }
-
- const ConstTensorHandle* m_Weight;
- const ConstTensorHandle* m_Bias;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -213,15 +204,6 @@ struct Pooling3dQueueDescriptor : QueueDescriptorWithParameters<Pooling3dDescrip
// Convolution 2D layer workload data.
struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2dDescriptor>
{
- Convolution2dQueueDescriptor()
- : m_Weight(nullptr)
- , m_Bias(nullptr)
- {
- }
-
- const ConstTensorHandle* m_Weight;
- const ConstTensorHandle* m_Bias;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -246,15 +228,6 @@ struct Convolution3dQueueDescriptor : QueueDescriptorWithParameters<Convolution3
///
struct DepthwiseConvolution2dQueueDescriptor : QueueDescriptorWithParameters<DepthwiseConvolution2dDescriptor>
{
- DepthwiseConvolution2dQueueDescriptor()
- : m_Weight(nullptr)
- , m_Bias(nullptr)
- {
- }
-
- const ConstTensorHandle* m_Weight;
- const ConstTensorHandle* m_Bias;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 40fbde8ac8..7b24fd77b8 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -421,7 +421,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
ConstWorkloads.push_back(m_WorkloadQueue.back().get());
}
}
- // release the constant data in the layer..
+ // release the constant data in the layer.
layer->ReleaseConstantData();
break;
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index a61624fb0a..158142f48e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1714,9 +1714,6 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ConvertConstants");
Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
-
- // Once the constants are converted we can now safely call RedirectMembersToConstantInputs
- Optimizer::Pass(optGraph, MakeOptimizations(RedirectMembersToConstantInputs()));
}
// This must occur after all topological changes to the graph and any redirection of variables
@@ -1860,82 +1857,6 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescr
return m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const Optional<ConstTensor>& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
-{
- ConstantLayer* weightsLayer = nullptr;
- ConstantLayer* biasLayer = nullptr;
- unsigned int numInputs = fullyConnectedDescriptor.GetNumInputs();
-
- // Add a constant layer for weights
- if (weights.has_value())
- {
- weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights.value());
-
- TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo();
- weightsInfo.SetConstant();
-
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
- }
- else if (fullyConnectedDescriptor.m_ConstantWeights)
- {
- throw InvalidArgumentException("AddFullyConnectedLayer: Constant weights tensor is empty.");
- }
-
- // Add a constant layer for biases
- if (biases.has_value() && fullyConnectedDescriptor.m_BiasEnabled)
- {
- biasLayer = m_Graph->AddLayer<ConstantLayer>("Biases");
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biases.value());
-
- TensorInfo biasInfo = biasLayer->m_LayerOutput->GetTensorInfo();
- biasInfo.SetConstant();
-
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
- }
-
- if (numInputs < 2)
- {
- throw InvalidArgumentException("AddFullyConnectedLayer: Requires at least 2 input tensors: Input, Weights");
- }
-
- auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
-
- if (weightsLayer)
- {
- // Connect weights layer
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
- }
-
- if ( fullyConnectedDescriptor.m_BiasEnabled && numInputs == 3 )
- {
- if (biasLayer)
- {
- // Connect bias layer
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- }
- }
- else if ( !fullyConnectedDescriptor.m_BiasEnabled && numInputs == 2 )
- {
- // Bias is disabled
- layer->m_Bias = nullptr;
- }
- else
- {
- throw InvalidArgumentException(fmt::format(
- "AddFullyConnectedLayer: Value mismatch. When bias is enabled in the "
- "descriptor the number of inputs is expected to be 3 otherwise 2. "
- "BiasEnabled={}, numInputs={}",
- fullyConnectedDescriptor.m_BiasEnabled,
- numInputs));
- }
-
- return layer;
-}
-
IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name)
{
@@ -1948,32 +1869,6 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescrip
return m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
-{
- auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
- // Add a constant layer for weights
- ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- auto weightsTensorHandle = std::make_shared<ScopedTensorHandle>(weights);
- weightsLayer->m_LayerOutput = weightsTensorHandle;
- layer->m_Weight = weightsTensorHandle;
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
- // Add a constant layer for biases
- if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled)
- {
- ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias");
- auto biasTensorHandle = std::make_shared<ScopedTensorHandle>(biases.value());
- biasLayer->m_LayerOutput = biasTensorHandle;
- layer->m_Bias = biasTensorHandle;
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- }
- return layer;
-}
-
IConnectableLayer* NetworkImpl::AddConvertFp16ToFp32Layer(const char* name)
{
return m_Graph->AddLayer<ConvertFp16ToFp32Layer>(name);
@@ -2003,38 +1898,6 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
return m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
-{
- auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
-
- // Add a constant layer for weights
- ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- auto weightsTensorHandle = std::make_shared<ScopedTensorHandle>(weights);
- weightsLayer->m_LayerOutput = weightsTensorHandle;
- layer->m_Weight = weightsTensorHandle;
-
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
-
- // Add a constant layer for biases
- if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled)
- {
- ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias");
- auto biasTensorHandle = std::make_shared<ScopedTensorHandle>(biases.value());
- biasLayer->m_LayerOutput = biasTensorHandle;
- layer->m_Bias = biasTensorHandle;
-
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- }
-
- return layer;
-}
-
IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
const ConstTensor& anchors, const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 5ca16e2968..a37a4be218 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -76,23 +76,6 @@ public:
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "23.02")
- IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "23.02")
- IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "23.02")
- IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr);
-
IConnectableLayer* AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
const char* name = nullptr);
@@ -101,23 +84,14 @@ public:
IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
const char* name = nullptr);
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr);
+ IConnectableLayer* AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const char* name = nullptr);
IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
- IConnectableLayer* AddDetectionPostProcessLayer(
- const DetectionPostProcessDescriptor& descriptor,
- const ConstTensor& anchors,
- const char* name = nullptr);
+ IConnectableLayer* AddDetectionPostProcessLayer(const DetectionPostProcessDescriptor& descriptor,
+ const ConstTensor& anchors,
+ const char* name = nullptr);
IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
@@ -134,11 +108,6 @@ public:
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const char* name = nullptr);
- IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const Optional<ConstTensor>& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr);
-
IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name = nullptr);
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index d0233976c4..e06b45acb0 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,18 +48,8 @@ void Convolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn
std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- // on this level constant data should not be released..
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Convolution2dLayer_CreateWorkload");
Convolution2dQueueDescriptor descriptor;
- if (m_Weight)
- {
- descriptor.m_Weight = m_Weight.get();
- }
- if (m_Param.m_BiasEnabled && m_Bias)
- {
- descriptor.m_Bias = m_Bias.get();
- }
-
SetAdditionalInfo(descriptor);
return factory.CreateWorkload(LayerType::Convolution2d, descriptor, PrepInfoAndDesc(descriptor));
@@ -68,14 +58,6 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFac
Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<Convolution2dLayer>(graph, m_Param, GetName());
-
- layer->m_Weight = m_Weight ? m_Weight : nullptr;
-
- if (layer->m_Param.m_BiasEnabled)
- {
- layer->m_Bias = m_Bias ? m_Bias : nullptr;
- }
-
return std::move(layer);
}
@@ -140,14 +122,7 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs()
Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
{
Layer::ConstantTensors tensors = GetConnectedConstantAsInputTensors();
-
- if (!tensors.empty())
- {
- return tensors;
- }
-
- // For API stability DO NOT ALTER order and add new members to the end of vector
- return {m_Weight, m_Bias};
+ return tensors;
}
void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 02ae05f83b..f7e4dec72f 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -16,13 +16,6 @@ class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
{
public:
- /// A unique pointer to store Weight values.
- /// @Note: Deprecated. Removal date is 23.02. Weights are stored in ConstantLayers now.
- std::shared_ptr<ConstTensorHandle> m_Weight;
- /// A unique pointer to store Bias values.
- /// @Note: Deprecated. Removal date is 23.02. Bias are stored in ConstantLayers now.
- std::shared_ptr<ConstTensorHandle> m_Bias;
-
/// Makes a workload for the Convolution2d type.
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
@@ -48,6 +41,10 @@ public:
void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
+ /// This layer does not have any data stored, weights and bias are now stored in constant layers.
+ /// We do not want to release the data in the constant layer, that is why we override with an empty function.
+ void ReleaseConstantData() override {}
+
protected:
/// Constructor to create a Convolution2dLayer.
/// @param [in] param Convolution2dDescriptor to configure the convolution2d operation.
@@ -57,8 +54,8 @@ protected:
/// Default destructor
~Convolution2dLayer() = default;
- /// @Note Deprecated. GetConstantTensorsByRef is deprecated. m_Weights and m_Bias
- /// should be connected to layer as Constant Layers instead."
+ /// Retrieve the handles to the constant values connected to the layer.
+ /// @return A vector of the constant tensors connected to the layer.
ConstantTensors GetConstantTensorsByRef() override;
};
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index dcd800e367..4c97437a1c 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,16 +50,6 @@ void DepthwiseConvolution2dLayer::SerializeLayerParameters(ParameterStringifyFun
std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
DepthwiseConvolution2dQueueDescriptor descriptor;
-
- if (m_Weight)
- {
- descriptor.m_Weight = m_Weight.get();
- }
- if (m_Param.m_BiasEnabled && m_Bias)
- {
- descriptor.m_Bias = m_Bias.get();
- }
-
SetAdditionalInfo(descriptor);
return factory.CreateWorkload(LayerType::DepthwiseConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
@@ -68,13 +58,6 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWo
DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<DepthwiseConvolution2dLayer>(graph, m_Param, GetName());
- layer->m_Weight = m_Weight ? m_Weight : nullptr;
-
- if (layer->m_Param.m_BiasEnabled)
- {
- layer->m_Bias = m_Bias ? m_Bias : nullptr;
- }
-
return std::move(layer);
}
@@ -143,14 +126,7 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
{
Layer::ConstantTensors tensors = GetConnectedConstantAsInputTensors();
-
- if (!tensors.empty())
- {
- return tensors;
- }
-
- // For API stability DO NOT ALTER order and add new members to the end of vector
- return {m_Weight, m_Bias};
+ return tensors;
}
void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index baae7f122a..ef7410f1d3 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -15,12 +15,6 @@ class ScopedTensorHandle;
class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolution2dDescriptor>
{
public:
- /// A unique pointer to store Weight values.
- /// @Note Deprecated. Removal date is 23.02. Weights are stored in ConstantLayers now.
- std::shared_ptr<ConstTensorHandle> m_Weight;
- /// A unique pointer to store Bias values.
- /// @Note Deprecated. Removal date is 23.02. Bias are stored in ConstantLayers now.
- std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the DepthwiseConvolution2d type.
/// @param [in] graph The graph where this layer can be found.
@@ -47,6 +41,10 @@ public:
void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
+ /// This layer does not have any data stored, weights and bias are now stored in constant layers.
+ /// We do not want to release the data in the constant layer, that is why we override with an empty function.
+ void ReleaseConstantData() override {}
+
protected:
/// Constructor to create a DepthwiseConvolution2dLayer.
/// @param [in] param DepthwiseConvolution2dDescriptor to configure the depthwise convolution2d.
@@ -56,10 +54,8 @@ protected:
/// Default destructor
~DepthwiseConvolution2dLayer() = default;
- /// Retrieve the handles to the constant values stored by the layer.
- /// @return A vector of the constant tensors stored by this layer.
- /// @Note Deprecated. GetConstantTensorsByRef is deprecated. m_Weights and m_Bias
- /// should be connected to layer as Constant Layers instead."
+ /// Retrieve the handles to the constant values connected to the layer.
+ /// @return A vector of the constant tensors connected to the layer.
ConstantTensors GetConstantTensorsByRef() override;
};
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index c20bc8d167..05c53961e3 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FullyConnectedLayer.hpp"
@@ -22,27 +22,13 @@ FullyConnectedLayer::FullyConnectedLayer(const FullyConnectedDescriptor& param,
std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
FullyConnectedQueueDescriptor descriptor;
- if (m_Weight)
- {
- descriptor.m_Weight = m_Weight.get();
- }
- if (m_Param.m_BiasEnabled && m_Bias)
- {
- descriptor.m_Bias = m_Bias.get();
- }
SetAdditionalInfo(descriptor);
-
return factory.CreateWorkload(LayerType::FullyConnected, descriptor, PrepInfoAndDesc(descriptor));
}
FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<FullyConnectedLayer>(graph, m_Param, GetName());
- layer->m_Weight = m_Weight ? m_Weight : nullptr;
- if (layer->m_Param.m_BiasEnabled)
- {
- layer->m_Bias = m_Bias ? m_Bias : nullptr;
- }
return std::move(layer);
}
@@ -78,14 +64,7 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs()
Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
{
Layer::ConstantTensors tensors = GetConnectedConstantAsInputTensors();
-
- if (!tensors.empty())
- {
- return tensors;
- }
-
- // For API stability DO NOT ALTER order and add new members to the end of vector
- return {m_Weight, m_Bias};
+ return tensors;
}
void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index 07f4a936f9..f3ca696b62 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -15,12 +15,6 @@ class ScopedTensorHandle;
class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor>
{
public:
- /// A unique pointer to store Weight values.
- /// @Note: Deprecated. Removal date is 23.02. Weights are stored in ConstantLayers now.
- std::shared_ptr<ConstTensorHandle> m_Weight;
- /// A unique pointer to store Bias values.
- /// @Note: Deprecated. Removal date is 23.02. Bias are stored in ConstantLayers now.
- std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the FullyConnected type.
/// @param [in] graph The graph where this layer can be found.
@@ -45,6 +39,10 @@ public:
void ExecuteStrategy(IStrategy& strategy) const override;
+ /// This layer does not have any data stored, weights and bias are now stored in constant layers.
+ /// We do not want to release the data in the constant layer, that is why we override with an empty function.
+ void ReleaseConstantData() override {}
+
protected:
/// Constructor to create a FullyConnectedLayer.
/// @param [in] param FullyConnectedDescriptor to configure the fully connected operation.
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
index a11dec9446..0e67516193 100644
--- a/src/armnn/optimizations/All.hpp
+++ b/src/armnn/optimizations/All.hpp
@@ -20,6 +20,5 @@
#include "PermuteAsReshape.hpp"
#include "PermuteAndBatchToSpaceAsDepthToSpace.hpp"
#include "PermuteDepthwiseConv2dWeights.hpp"
-#include "RedirectMembersToConstantInputs.hpp"
#include "SquashEqualSiblings.hpp"
#include "TransposeAsReshape.hpp" \ No newline at end of file
diff --git a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp
index 7a50c4ac06..874749fda9 100644
--- a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp
+++ b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -196,21 +196,14 @@ public:
if (newConv2dLayer != nullptr)
{
const auto conv2dLayer = PolymorphicDowncast<Convolution2dLayer*>(&connection.GetOwningLayer());
- // Copy weights and bias to the new convolution layer
ARMNN_ASSERT_MSG(newConv2dLayer->GetInputSlot(1).GetConnection() != nullptr,
"FoldPadIntoConvolution2d: New convolution layer is missing connection to weights layer");
- // Deprecated. Removal date is 23.02.
- newConv2dLayer->m_Weight = std::move(conv2dLayer->m_Weight);
-
if (conv2dLayer->GetParameters().m_BiasEnabled)
{
ARMNN_ASSERT_MSG(newConv2dLayer->GetInputSlot(2).GetConnection() != nullptr,
"FoldPadIntoConvolution2d: New convolution layer is missing "
"connection to bias layer.");
-
- // Deprecated. Removal date is 23.02.
- newConv2dLayer->m_Bias = std::move(conv2dLayer->m_Bias);
}
}
}
@@ -230,24 +223,18 @@ public:
if (newConv2dLayer != nullptr)
{
const auto conv2dLayer = PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&connection.GetOwningLayer());
- // Copy weights and bias to the new convolution layer
ARMNN_ASSERT_MSG(newConv2dLayer->GetInputSlot(1).GetConnection() != nullptr,
- "FoldPadIntoDepthwiseConvolution2d: New convolution layer is missing connection to weights layer");
-
- // Deprecated. Removal date is 23.02.
- newConv2dLayer->m_Weight = std::move(conv2dLayer->m_Weight);
+ "FoldPadIntoDepthwiseConvolution2d: New convolution layer is missing "
+ "connection to weights layer");
if (conv2dLayer->GetParameters().m_BiasEnabled)
{
ARMNN_ASSERT_MSG(newConv2dLayer->GetInputSlot(2).GetConnection() != nullptr,
"FoldPadIntoConvolution2d: New convolution layer is missing "
"connection to bias layer.");
- // Deprecated. Removal date is 23.02.
- newConv2dLayer->m_Bias = std::move(conv2dLayer->m_Bias);
}
}
}
-
protected:
FoldPadIntoDepthwiseConvolution2dImpl() = default;
~FoldPadIntoDepthwiseConvolution2dImpl() = default;
diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp
index bca0c7d00a..88ac97cd0c 100644
--- a/src/armnn/optimizations/FuseBatchNorm.hpp
+++ b/src/armnn/optimizations/FuseBatchNorm.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -167,8 +167,6 @@ public:
auto& newConv2dLayer = *graph.InsertNewLayer<ConvLayer>(base.GetInputSlot(0),
convDescriptor,
name.c_str());
- newConv2dLayer.m_Weight = std::make_unique<ScopedTensorHandle>(fusedWeightsTensor);
- newConv2dLayer.m_Bias = std::make_unique<ScopedTensorHandle>(ConstTensor(fusedBiasTensor));
// Connect weights and bias from old to new Conv2d layer
// This optimization will always have 3 input slots on the Conv2d base layer
@@ -177,7 +175,7 @@ public:
// Remove old connection and connect to new layer2d
weightLayer->GetOutputSlot(0).Disconnect(base.GetInputSlot(1));
weightLayer->GetOutputSlot(0).Connect(newConv2dLayer.GetInputSlot(1));
- weightLayer->m_LayerOutput = newConv2dLayer.m_Weight;
+ weightLayer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(fusedWeightsTensor);
// Move bias const layers as normal if it was enabled before the optimisation
ConstantLayer* biasLayer;
@@ -198,7 +196,7 @@ public:
biasLayer->GetOutputSlot(0).SetTensorInfo(fusedBiasTensor.GetInfo());
biasLayer->GetOutputSlot(0).Connect(newConv2dLayer.GetInputSlot(2));
}
- biasLayer->m_LayerOutput = newConv2dLayer.m_Bias;
+ biasLayer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(fusedBiasTensor));
}
diff --git a/src/armnn/optimizations/RedirectMembersToConstantInputs.hpp b/src/armnn/optimizations/RedirectMembersToConstantInputs.hpp
deleted file mode 100644
index a2bad710e6..0000000000
--- a/src/armnn/optimizations/RedirectMembersToConstantInputs.hpp
+++ /dev/null
@@ -1,90 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "Optimization.hpp"
-
-#include <armnn/utility/IgnoreUnused.hpp>
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-namespace armnn
-{
-namespace optimizations
-{
-
-class RedirectMembersToConstantInputsImpl
-{
-public:
- /// Search for layers with ConstantLayers as inputs. If the inputs are constant redirect the layers member
- /// variable for ConstTensors (e.g. m_weights) to the data stored in the ConstantLayer it is connected to.
- void Run(Graph& graph, Layer& layer) const
- {
- IgnoreUnused(graph);
-
- switch (layer.GetType())
- {
- case LayerType::BatchNormalization:
- break;
- case LayerType::Convolution2d:
- RedirectWeightsAndBiases<Convolution2dLayer>(&layer);
- break;
- case LayerType::DepthwiseConvolution2d:
- RedirectWeightsAndBiases<DepthwiseConvolution2dLayer>(&layer);
- break;
- case LayerType::DetectionPostProcess:
- break;
- case LayerType::FullyConnected:
- RedirectWeightsAndBiases<FullyConnectedLayer>(&layer);
- break;
- case LayerType::Lstm:
- break;
- case LayerType::TransposeConvolution2d:
- break;
- default:
- break;
- }
- }
-
-protected:
- RedirectMembersToConstantInputsImpl() = default;
- ~RedirectMembersToConstantInputsImpl() = default;
-
-private:
- template <typename LayerT>
- static LayerT* RedirectWeightsAndBiases(Layer* layer)
- {
- LayerT* layerPtr = PolymorphicDowncast<LayerT*>(layer);
-
- // Loop through input slots to check for constant weights and biases layers.
- // Weights index = 1, Biases index = 2.
- for (unsigned int inputSlotIndex = 1; inputSlotIndex != layerPtr->GetNumInputSlots(); ++inputSlotIndex)
- {
- OutputSlot* outputSlot = layerPtr->GetInputSlot(inputSlotIndex).GetConnectedOutputSlot();
- // Debug layers should not be inserted in optimize process yet
- ARMNN_ASSERT(outputSlot->GetOwningLayer().GetType() != LayerType::Debug);
- if (outputSlot->GetOwningLayer().GetType() == LayerType::Constant)
- {
- // Get constant layer and redirect base layer member variables.
- ConstantLayer& constantLayer = dynamic_cast<ConstantLayer&>(outputSlot->GetOwningLayer());
- if (inputSlotIndex == 1)
- {
- layerPtr->m_Weight = constantLayer.m_LayerOutput;
- }
- else if (inputSlotIndex == 2)
- {
- layerPtr->m_Bias = constantLayer.m_LayerOutput;
- }
- }
- }
-
- return layerPtr;
- }
-};
-
-using RedirectMembersToConstantInputs = OptimizeForType<Layer, RedirectMembersToConstantInputsImpl>;
-
-} // namespace optimizations
-} // namespace armnn
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index b78863dddc..f83900404b 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -441,16 +441,15 @@ void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
Layer* input = graph.AddLayer<InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(inputInfo);
- ConstantLayer* weightsLayer = nullptr;
- weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
+ ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
+
input->GetOutputSlot().Connect(layer->GetInputSlot(0));
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -908,11 +907,10 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
{
std::vector<float> biasVector = { 11 };
ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), biasVector);
- biasLayer =graph.AddLayer<ConstantLayer>("Bias");
+ biasLayer = graph.AddLayer<ConstantLayer>("Bias");
biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(bias);
biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
- conv->m_Bias = biasLayer->m_LayerOutput;
}
// Connect layers
@@ -921,9 +919,6 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- // Temporary workaround to ensure the descriptor weights are populated
- conv->m_Weight = weightsLayer->m_LayerOutput;
-
if (convolution2dDescriptor.m_BiasEnabled)
{
CHECK(6 == graph.GetNumLayers());
@@ -983,22 +978,22 @@ TEST_CASE("OptimizeForExclusiveConnectionsWithoutFuseTest")
batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
- CHECK(5 == graph.GetNumLayers());
+ CHECK((5 == graph.GetNumLayers()));
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::BatchNormalizationLayer>,
- &IsLayerOfType<armnn::OutputLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::Convolution2dLayer>,
+ &IsLayerOfType<armnn::BatchNormalizationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Optimize graph
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FuseBatchNormIntoConvolution2DFloat32()));
CHECK(5 == graph.GetNumLayers());
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::BatchNormalizationLayer>,
- &IsLayerOfType<armnn::OutputLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::Convolution2dLayer>,
+ &IsLayerOfType<armnn::BatchNormalizationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
}
} // Optimizer TestSuite
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 34e5f6d3b6..118907e703 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -1,12 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <TestUtils.hpp>
-#include <Optimizer.hpp>
#include <Half.hpp>
+#include <Optimizer.hpp>
#include <doctest/doctest.h>
@@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsFloatToHalfTest")
// Create const tensor from fp32 data
unsigned int dims[] = { 4, 1, 1, 1 };
std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
+ armnn::TensorInfo weightsInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::ConstTensor weights(weightsInfo, floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
+ auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("weights");
+ weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo);
+
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
// Connect up the layers
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether data matches expected fp16 data
- const Half* data = fc->m_Weight->GetConstTensor<Half>();
+ const Half* data = weightsLayer->m_LayerOutput->GetConstTensor<Half>();
CHECK(data[0] == Half(1.0f));
CHECK(data[1] == Half(2.0f));
CHECK(data[2] == Half(3.0f));
@@ -100,12 +105,14 @@ TEST_CASE("ConvertConstantsFloatToHalfTest_constant")
fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
+ CHECK(5 == graph.GetNumLayers());
CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
+ CHECK(5 == graph.GetNumLayers());
CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether weights data matches expected fp16 data
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 4c453cc799..778d7b0814 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsHalfToFloatTest")
std::vector<uint16_t> halfWeights(4);
armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
halfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true), halfWeights);
+ armnn::TensorInfo weightInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true);
+ armnn::ConstTensor weights(weightInfo, halfWeights);
//Create the simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
+ auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("weights");
+ weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
+
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
//Connect up the layers
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
//Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
//Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
- const float* data = fc->m_Weight->GetConstTensor<float>();
+ const float* data = weightsLayer->m_LayerOutput->GetConstTensor<float>();
CHECK(1.0f == data[0]);
CHECK(2.0f == data[1]);
CHECK(3.0f == data[2]);
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
index bc8839948b..0a4a4fafde 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,14 +33,21 @@ TEST_CASE("Fp32NetworkToFp16OptimizationTest")
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ CHECK(floor->GetDataType() == armnn::DataType::Float16);
+ CHECK(floor->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(floor->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
}
} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 54cbbce89f..5cbd17fb6a 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,13 +27,8 @@ public:
static IConnectableLayer *AddConvolution(INetwork *network,
const Convolution2dDescriptor &descriptor,
- const ConstTensor &weights,
- const Optional<ConstTensor> &biases,
const char *name)
{
- IgnoreUnused(weights);
- IgnoreUnused(biases);
-
return network->AddConvolution2dLayer(descriptor, name);
}
@@ -65,12 +60,8 @@ public:
static IConnectableLayer* AddConvolution(INetwork* network,
const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name)
{
- IgnoreUnused(weights);
- IgnoreUnused(biases);
return network->AddDepthwiseConvolution2dLayer(descriptor, name);
}
@@ -171,8 +162,6 @@ INetworkPtr CreateNetwork(bool depthwise, bool preventFusing)
IConnectableLayer* convLayer = Conv2dTest::AddConvolution(network.get(),
convolution2dDescriptor,
- weights,
- Optional<ConstTensor>(),
"convolution");
IConnectableLayer* batchNormLayer = network->AddBatchNormalizationLayer(batchNormDescriptor,
@@ -243,13 +232,21 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
return IsLayerOfType<ConvLayerType>(layer) &&
(layer->GetNameStr() == "fused-batchNorm-into-convolution");
};
-
+ auto checkConstant = [ ](const armnn::Layer* const layer) -> bool
+ {
+ const ConstantLayer* constLayer = PolymorphicDowncast<const ConstantLayer*>(layer);
+ auto tensor = ConstTensor(constLayer->m_LayerOutput->GetTensorInfo(),
+ constLayer->m_LayerOutput->Map(true));
+ const auto* buffer = static_cast<const T*>(tensor.GetMemoryArea());
+ std::vector<T> vector(buffer, buffer + tensor.GetNumElements());
+ return IsLayerOfType<ConstantLayer>(layer);
+ };
CHECK(5 == graphFused.GetNumLayers());
CHECK(CheckSequence(graphFused.cbegin(),
graphFused.cend(),
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<ConstantLayer>,
+ checkConstant,
+ checkConstant,
checkFusedConv2d,
&IsLayerOfType<OutputLayer>));
diff --git a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
deleted file mode 100644
index b3f9ed8780..0000000000
--- a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("RedirectMembersToConstantInputsFullyConnectedTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo inputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 4 }, armnn::DataType::Float32, 0.0f, 0, true);
- const armnn::TensorInfo biasesInfo ({ 2 }, armnn::DataType::Float32, 0.0f, 0, true);
-
- // Check if isConstant is enabled for weights and biases tensor info.
- CHECK(weightsInfo.IsConstant());
- CHECK(biasesInfo.IsConstant());
-
- armnn::FullyConnectedDescriptor desc;
- desc.m_BiasEnabled = true;
- desc.m_ConstantWeights = false;
-
- // Create the simple test network with Weights and Biases as inputs to a FullyConnected layer.
- auto input = graph.AddLayer<armnn::InputLayer>(0, "Input");
- auto weights = graph.AddLayer<armnn::ConstantLayer>("Weights");
- auto biases = graph.AddLayer<armnn::ConstantLayer>("Biases");
- auto fcLayer = graph.AddLayer<armnn::FullyConnectedLayer>(desc, "FullyConnected");
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "Output");
-
- float expectedWeightsData[] = { 1.0f, 1.0f, 1.0f, 1.0f };
- float expectedBiasesData[] = { 2.0f, 2.0f };
-
- // Set the m_LayerOutput for the optimizer to point to.
- armnn::ConstTensor weightsTensor(weightsInfo, &expectedWeightsData);
- armnn::ConstTensor biasesTensor(biasesInfo, &expectedBiasesData);
- weights->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weightsTensor);
- biases->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(biasesTensor);
-
- input->GetOutputSlot().SetTensorInfo(inputInfo);
- weights->GetOutputSlot().SetTensorInfo(weightsInfo);
- biases->GetOutputSlot().SetTensorInfo(biasesInfo);
- fcLayer->GetOutputSlot().SetTensorInfo(outputInfo);
-
- // Connect up the layers
- input->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(0));
- weights->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(1));
- biases->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(2));
- fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- // Member variables should be null before optimization.
- CHECK(fcLayer->m_Weight == nullptr);
- CHECK(fcLayer->m_Bias == nullptr);
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs()));
-
- // Check if member variables are not null and shape is set correctly.
- CHECK(fcLayer->m_Weight != nullptr);
- CHECK(fcLayer->m_Bias != nullptr);
- CHECK(fcLayer->m_Weight->GetTensorInfo().GetShape() == weightsInfo.GetShape());
- CHECK(fcLayer->m_Bias->GetTensorInfo().GetShape() == biasesInfo.GetShape());
-
- // Check whether data matches expected float data
- const float* weightsData = fcLayer->m_Weight->GetConstTensor<float>();
- CHECK(weightsData[0] == expectedWeightsData[0]);
- CHECK(weightsData[1] == expectedWeightsData[1]);
- CHECK(weightsData[2] == expectedWeightsData[2]);
- CHECK(weightsData[3] == expectedWeightsData[3]);
-
- const float* biasesData = fcLayer->m_Bias->GetConstTensor<float>();
- CHECK(biasesData[0] == expectedBiasesData[0]);
- CHECK(biasesData[1] == expectedBiasesData[1]);
-}
-
-} \ No newline at end of file
diff --git a/src/armnnTestUtils/CommonTestUtils.hpp b/src/armnnTestUtils/CommonTestUtils.hpp
index 5b4b356247..4262ed5180 100644
--- a/src/armnnTestUtils/CommonTestUtils.hpp
+++ b/src/armnnTestUtils/CommonTestUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -70,15 +70,6 @@ inline bool Compare(T a, T b, float tolerance = 0.000001f)
return std::fabs(static_cast<float>(a) - static_cast<float>(b)) <= tolerance;
}
-template <typename ConvolutionLayer>
-void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo)
-{
- layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weightInfo);
- layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(biasInfo);
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-}
armnn::SubgraphView::InputSlots CreateInputsFrom(armnn::Layer* layer,
std::vector<unsigned int> ignoreSlots = {});
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 905b8fa50b..0846d21388 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -495,10 +495,6 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Weight->Allocate();
-
armnn::TensorInfo weightsTensorInfo(weightShape, DataType, inputsQScale);
weightsTensorInfo.SetConstant();
@@ -562,12 +558,6 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
armnn::TensorInfo weightsTensorInfo(weightShape, DataType, inputsQScale);
weightsTensorInfo.SetConstant();
@@ -662,12 +652,6 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a
TensorShape biasShape = TensorShape{ 32 };
TensorShape inputShape = TensorShape{ 1, 32, 149, 149 };
TensorShape outputShape = TensorShape{ 1, 32, 147, 147 };
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo(biasShape, GetBiasDataType(DataType)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
armnn::TensorInfo weightsTensorInfo(weightShape, DataType, inputsQScale);
weightsTensorInfo.SetConstant();
@@ -1132,12 +1116,6 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
armnn::TensorInfo biasTensorInfo(biasShape, GetBiasDataType(DataType), inputsQScale);
biasTensorInfo.SetConstant();
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(biasTensorInfo);
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
// Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
auto const weights = graph.AddLayer<ConstantLayer>("weights");
@@ -1170,8 +1148,6 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
CHECK(queueDescriptor.m_Inputs.size() == 3);
CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == weightsTensorInfo));
- CHECK((queueDescriptor.m_Bias->GetTensorInfo() == biasTensorInfo));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1203,9 +1179,6 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType)); // [ 1, H, W, I*M ]
- layer->m_Weight->Allocate();
// Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
@@ -1252,10 +1225,6 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Weight->Allocate();
-
armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
weightsTensorInfo.SetConstant();
@@ -1301,12 +1270,6 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale);
weightsTensorInfo.SetConstant();
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index 766bf2d2cc..c9d6c71f18 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -188,8 +188,6 @@ LayerType* FuseConvolution2dLayer(OptimizationViews& optimizationViews,
LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
- replacementLayer->m_Weight = std::move(baseLayer->m_Weight);
- replacementLayer->m_Bias = std::move(baseLayer->m_Bias);
FuseLayer(optimizationViews,
baseLayer,
@@ -212,8 +210,6 @@ LayerType* FuseDepthwiseConvolution2dLayer(OptimizationViews& optimizationViews,
LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
- replacementLayer->m_Weight = std::move(baseLayer->m_Weight);
- replacementLayer->m_Bias = std::move(baseLayer->m_Bias);
FuseLayer(optimizationViews,
baseLayer,
@@ -242,8 +238,6 @@ LayerType* FuseFullyConnectedLayer(OptimizationViews& optimizationViews,
activationLayer,
activationDesc);
- replacementLayer->m_Weight = std::move(baseLayer->m_Weight);
- replacementLayer->m_Bias = std::move(baseLayer->m_Bias);
return replacementLayer;
}
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 0d98804954..6a673c6e93 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1469,8 +1469,6 @@ void CreateReferenceDynamicBackendTestImpl()
{ outputInfo }
};
convolution2dQueueDescriptor.m_Inputs.push_back(nullptr);
- auto weights = std::make_unique<ScopedTensorHandle>(weightInfo);
- convolution2dQueueDescriptor.m_Weight = weights.get();
// Create a convolution workload with the dummy settings
auto workload = referenceWorkloadFactory->CreateWorkload(LayerType::Convolution2d,
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 18f11a542e..fb7a0271d4 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -242,10 +242,6 @@ struct DummyConvolutionLayer
desc.m_StrideX = 1;
desc.m_StrideY = 1;
m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
~DummyConvolutionLayer()
@@ -268,10 +264,28 @@ struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
{
};
-template<>
+// Note: When m_Weight and m_Bias are removed from TransposeConvolution, Transpose can use DummyConvolutionLayer
+template <>
struct DummyLayer<armnn::TransposeConvolution2dLayer>
- : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
{
+ DummyLayer()
+ {
+ typename armnn::TransposeConvolution2dLayer::DescriptorType desc;
+ desc.m_StrideX = 1;
+ desc.m_StrideY = 1;
+ m_Layer = dummyGraph.AddLayer<armnn::TransposeConvolution2dLayer>(desc, "");
+ m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+ m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+ }
+
+ ~DummyLayer()
+ {
+ dummyGraph.EraseLayer(m_Layer);
+ }
+
+ armnn::TransposeConvolution2dLayer* m_Layer;
};
template<>
@@ -518,8 +532,6 @@ struct DummyLayer<armnn::FullyConnectedLayer>
{
armnn::FullyConnectedLayer::DescriptorType desc;
m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
~DummyLayer()
@@ -767,12 +779,6 @@ unsigned int GetNumOutputs(const armnn::Layer& layer)
return layer.GetNumOutputSlots();
}
-template<>
-unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
-{
- IgnoreUnused(layer);
- return 2;
-}
// Tests that the IsLayerSupported() function returns the correct value.
// We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index abfb621c93..b7b514573c 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -12,8 +12,6 @@
#include <doctest/doctest.h>
-#include <utility>
-
using namespace armnn;
using namespace std;
@@ -84,29 +82,15 @@ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
layerDesc.m_StrideY = 4;
layerDesc.m_BiasEnabled = true;
- Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 },
- armnn::DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
- ConstantLayer* biasLayer = graph.AddLayer<ConstantLayer>("Bias");
+ auto* const convolutionLayer = graph.AddLayer<Convolution2dLayer>(layerDesc, "convolution");
+ auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 },
- armnn::DataType::Float32));
+ TensorInfo weightsInfo = TensorInfo({ 2, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true);
+ TensorInfo biasInfo = TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true);
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(
- TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
-
- TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo();
- weightsInfo.SetConstant();
- TensorInfo biasInfo = biasLayer->m_LayerOutput->GetTensorInfo();
- biasInfo.SetConstant();
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
+ biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
@@ -116,21 +100,21 @@ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32));
- weightsLayer->GetOutputSlot().Connect(layer->GetInputSlot(1));
- biasLayer->GetOutputSlot().Connect(layer->GetInputSlot(2));
- Connect(layer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
+ Connect(input, convolutionLayer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32));
+ weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1));
+ biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2));
+ Connect(convolutionLayer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
// check the constants that they are not NULL
CHECK(weightsLayer->m_LayerOutput != nullptr);
CHECK(biasLayer->m_LayerOutput != nullptr);
- // free up the constants..
- layer->ReleaseConstantData();
+ // free up the constants.
+ convolutionLayer->ReleaseConstantData();
- // check the constants that they are NULL now
- CHECK(weightsLayer->m_LayerOutput == nullptr);
- CHECK(biasLayer->m_LayerOutput == nullptr);
+ // check the constants that they are still not NULL
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
}
TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
@@ -147,33 +131,39 @@ TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
layerDesc.m_StrideY = 4;
layerDesc.m_BiasEnabled = true;
- DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
+ auto* const convolutionLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "convolution");
+ auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
+
+ TensorInfo weightsInfo = TensorInfo({ 3, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true);
+ TensorInfo biasInfo = TensorInfo({ 9 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true);
+
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
+ biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(
- TensorInfo({3, 3, 5, 3}, DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(
- TensorInfo({9}, DataType::Float32));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
// create extra layers
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
- Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
+ Connect(input, convolutionLayer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
+ weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1));
+ biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2));
+ Connect(convolutionLayer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
// check the constants that they are not NULL
- CHECK(layer->m_Weight != nullptr);
- CHECK(layer->m_Bias != nullptr);
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
- // free up the constants..
- layer->ReleaseConstantData();
+ // free up the constants.
+ convolutionLayer->ReleaseConstantData();
- // check the constants that they are NULL now
- CHECK(layer->m_Weight == nullptr);
- CHECK(layer->m_Bias == nullptr);
+ // check the constants that they are still not NULL
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
}
TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
@@ -185,36 +175,42 @@ TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
layerDesc.m_BiasEnabled = true;
layerDesc.m_TransposeWeightMatrix = true;
- FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+ auto* const fullyConnectedLayer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+ auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
float inputsQScale = 1.0f;
float outputQScale = 2.0f;
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(
- TensorInfo({7, 20}, DataType::QAsymmU8, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(
- TensorInfo({7}, GetBiasDataType(DataType::QAsymmU8), inputsQScale));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
+ TensorInfo weightsInfo = TensorInfo({ 7, 20 }, DataType::QAsymmU8, inputsQScale, 0.0, true);
+ TensorInfo biasInfo = TensorInfo({ 7 }, GetBiasDataType(DataType::QAsymmU8), inputsQScale, 0.0, true);
+
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
+ biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
+
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
// create extra layers
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
- Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
+ Connect(input, fullyConnectedLayer, TensorInfo({ 3, 1, 4, 5 }, DataType::QAsymmU8, inputsQScale));
+ weightsLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(1));
+ biasLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(2));
+ Connect(fullyConnectedLayer, output, TensorInfo({ 3, 7 }, DataType::QAsymmU8, outputQScale));
// check the constants that they are not NULL
- CHECK(layer->m_Weight != nullptr);
- CHECK(layer->m_Bias != nullptr);
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
- // free up the constants..
- layer->ReleaseConstantData();
+ // free up the constants.
+ fullyConnectedLayer->ReleaseConstantData();
- // check the constants that they are NULL now
- CHECK(layer->m_Weight == nullptr);
- CHECK(layer->m_Bias == nullptr);
+ // check the constants that they are still not NULL
+ CHECK(weightsLayer->m_LayerOutput != nullptr);
+ CHECK(biasLayer->m_LayerOutput != nullptr);
}
}
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 67354696b0..997fe9850d 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -94,13 +94,10 @@ Convolution2dLayer* AddConvolutionLayer(Graph& graph,
LayerNameToLayerMap& layersInGraph,
const Convolution2dDescriptor& convolutionDescriptor,
const std::string& layerName,
- const TensorInfo& weightInfo,
- const TensorInfo& biasInfo,
const TensorInfo& outputInfo)
{
Convolution2dLayer* const convLayer = graph.AddLayer<Convolution2dLayer>(convolutionDescriptor, layerName.c_str());
CHECK(convLayer);
- SetWeightAndBias(convLayer, weightInfo, biasInfo);
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
layersInGraph.insert(std::make_pair(convLayer->GetName(), convLayer));
return convLayer;
@@ -335,11 +332,11 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, Layer
// Construct the graph
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
Convolution2dLayer* const convLayer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv layer", weightInfo, biasInfo, outputInfo);
+ "conv layer", outputInfo);
ConstantLayer* const weightsLayer =
- AddConstantLayer(graph, layersInGraph, "Weights Layer", constWeightsTensor, outputInfo);
- ConstantLayer* const biasLayer = AddConstantLayer(graph, layersInGraph, "Bias Layer", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer", constWeightsTensor, weightInfo);
+ ConstantLayer* const biasLayer = AddConstantLayer(graph, layersInGraph, "Bias Layer", constBiasTensor, biasInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -373,7 +370,6 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, Layer
std::vector<float> biasVector(16);
ConstTensor constBiasTensor(biasInfo, biasVector);
-
Convolution2dDescriptor convolutionDescriptor;
convolutionDescriptor.m_StrideX = 1;
convolutionDescriptor.m_StrideY = 1;
@@ -383,40 +379,40 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, Layer
// Construct the graph
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
+ "conv1 layer", outputInfo);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv2 layer", weightInfo, biasInfo, outputInfo);
+ "conv2 layer", outputInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, biasInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv3 layer", weightInfo, biasInfo, outputInfo);
+ "conv3 layer", outputInfo);
ConstantLayer* const weightsLayer3 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer3 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, biasInfo);
Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv4 layer", weightInfo, biasInfo, outputInfo);
+ "conv4 layer", outputInfo);
ConstantLayer* const weightsLayer4 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 4", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 4", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer4 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 4", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 4", constBiasTensor, biasInfo);
Convolution2dLayer* const conv5Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv5 layer", weightInfo, biasInfo, outputInfo);
+ "conv5 layer", outputInfo);
ConstantLayer* const weightsLayer5 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer5 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, biasInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -504,26 +500,26 @@ SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, Laye
// Construct the graph
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
+ "conv1 layer", outputInfo);
Pooling2dLayer* const pooling1Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor,
"pooling1 layer", outputInfo);
Pooling2dLayer* const pooling2Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor,
"pooling2 layer", outputInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2", constBiasTensor, biasInfo);
Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv2 layer", weightInfo, biasInfo, outputInfo);
+ "conv2 layer", outputInfo);
Pooling2dLayer* const pooling3Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor,
"pooling3 layer", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -581,14 +577,13 @@ SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, Lay
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
ConstantLayer* const weightsLayer =
- AddConstantLayer(graph, layersInGraph, "Weights Layer unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer =
- AddConstantLayer(graph, layersInGraph, "Bias Layer unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer unoptimizable", constBiasTensor, biasInfo);
Convolution2dLayer* const convLayer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv layer unoptimizable", weightInfo, biasInfo,
- outputInfo);
+ "conv layer unoptimizable", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
// Connect the network
@@ -631,46 +626,36 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, L
Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer3 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer3 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer4 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 4 unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 4 unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer4 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 4 unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 4 unoptimizable", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer5 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 5", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer5 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 5", constBiasTensor, biasInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
- Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph,
- layersInGraph,
- convolutionDescriptor,
- "conv2 layer unoptimizable",
- weightInfo,
- biasInfo,
- outputInfo);
+ "conv1 layer", outputInfo);
+ Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
+ "conv2 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv3 layer", weightInfo, biasInfo, outputInfo);
- Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph,
- layersInGraph,
- convolutionDescriptor,
- "conv4 layer unoptimizable",
- weightInfo,
- biasInfo,
- outputInfo);
+ "conv3 layer", outputInfo);
+ Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
+ "conv4 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv5Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv5 layer", weightInfo, biasInfo, outputInfo);
+ "conv5 layer", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
@@ -747,25 +732,24 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, L
Layer* const input2Layer = AddInputLayer(graph, "input2 layer", inputInfo, 1);
ConstantLayer* const weightsLayer1 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 1", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer1 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 1", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer2 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 2 unoptimizable", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer2 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 2 unoptimizable", constBiasTensor, biasInfo);
ConstantLayer* const weightsLayer3 =
- AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Weights Layer 3", constWeightsTensor, weightInfo);
ConstantLayer* const biasLayer3 =
- AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, outputInfo);
+ AddConstantLayer(graph, layersInGraph, "Bias Layer 3", constBiasTensor, biasInfo);
Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv1 layer", weightInfo, biasInfo, outputInfo);
+ "conv1 layer", outputInfo);
Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv2 layer unoptimizable", weightInfo, biasInfo,
- outputInfo);
+ "conv2 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
- "conv3 layer", weightInfo, biasInfo, outputInfo);
+ "conv3 layer", outputInfo);
AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo);
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index fed21eb911..2a09f6508f 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -158,13 +158,8 @@ TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
FullyConnectedQueueDescriptor invalidData;
WorkloadInfo invalidInfo;
- ScopedTensorHandle weightTensor(weightsDesc);
- ScopedTensorHandle biasTensor(biasesDesc);
-
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
- invalidData.m_Weight = &weightTensor;
- invalidData.m_Bias = &biasTensor;
invalidData.m_Parameters.m_BiasEnabled = true;
invalidData.m_Parameters.m_TransposeWeightMatrix = false;
@@ -678,16 +673,10 @@ TEST_CASE("BiasPerAxisQuantization_ValidateCorrectValues")
AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedTensorHandle weightTensor(weightInfo);
- queueDescriptor.m_Weight = &weightTensor;
-
// Test 1: correct per-axis quantization values
const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
- ScopedTensorHandle biasHandle1(biasInfo1);
- queueDescriptor.m_Bias = &biasHandle1;
-
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo1, nullptr);
CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
@@ -729,16 +718,10 @@ TEST_CASE("BiasPerAxisQuantization_ValidateIncorrectValues")
AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedTensorHandle weightTensor(weightInfo);
- queueDescriptor.m_Weight = &weightTensor;
-
- // Test 2: wrong per-axis quantization values
+ // Test 2: wrong per-axis quantization values
const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
- ScopedTensorHandle biasHandle2(biasInfo2);
- queueDescriptor.m_Bias = &biasHandle2;
-
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo2, nullptr);
CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
@@ -781,16 +764,10 @@ TEST_CASE("BiasPerAxisQuantization_ValidateInvalidArgumentException")
AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedTensorHandle weightTensor(weightInfo);
- queueDescriptor.m_Weight = &weightTensor;
-
// Test 3: mismatched number of quantization scales
const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
- ScopedTensorHandle biasHandle3(biasInfo3);
- queueDescriptor.m_Bias = &biasHandle3;
-
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo3, nullptr);
CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 1e0adc169a..8a2d8c8696 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -313,8 +313,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedTensorHandle biasTensor(biasDesc);
// Permute the kernel if necessary
std::vector<T> kernel = originalKernel;
@@ -322,12 +320,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
{
armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
}
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
- if(biasEnabled)
- {
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- }
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
@@ -339,8 +331,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -442,15 +432,13 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
+// armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+// AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
- armnn::ScopedTensorHandle biasTensor(biasDesc);
+// armnn::ScopedTensorHandle biasTensor(biasDesc);
armnn::Convolution2dQueueDescriptor data;
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -590,18 +578,16 @@ LayerTestResult<T,4> Convolution1dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelInfo);
- armnn::ScopedTensorHandle biasTensor(biasInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
+// armnn::ScopedTensorHandle weightsTensor(kernelInfo);
+// armnn::ScopedTensorHandle biasTensor(biasInfo);
+//
+// AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
+// AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
AddInputToWorkload(data, info, inputInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelInfo, weightsHandle.get());
AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
data.m_Parameters.m_StrideX = 1;
data.m_Parameters.m_StrideY = stride;
data.m_Parameters.m_PadLeft = 0;
@@ -1421,25 +1407,14 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedTensorHandle biasTensor(biasDesc);
-
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernel.data());
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padX;
@@ -1827,15 +1802,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
-
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // Needed in Neon and Cl Workload when permuting. Backend TensorHandle in (2) below will not work.
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- // Cannot PolymorphicDowncast from ScopedTensorHandle->RefTensorHandle.
- // Need to PolymorphicDowncast from ITensorHandle->RefTensorHandle.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -1852,8 +1818,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -2009,12 +1973,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2031,8 +1989,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
data.m_Parameters.m_StrideX = 1;
data.m_Parameters.m_StrideY = 1;
data.m_Parameters.m_PadLeft = 0;
@@ -2234,12 +2190,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2256,8 +2206,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
data.m_Parameters.m_StrideX = 2;
data.m_Parameters.m_StrideY = 1;
data.m_Parameters.m_PadLeft = 0;
@@ -2426,12 +2374,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2448,8 +2390,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
}
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
@@ -3177,25 +3117,14 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(aclKernelDescriptor);
- armnn::ScopedTensorHandle biasTensor(biasDesc);
-
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, aclKernelDescriptor, weightsHandle.get());
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), aclKernelData.data());
- AllocateAndCopyDataToITensorHandle(&weightsTensor, aclKernelData.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padX;
@@ -3696,16 +3625,14 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
WorkloadInfo workloadInfo;
- ScopedTensorHandle weightTensor(kernelInfo);
- ScopedTensorHandle biasTensor(biasInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
+// ScopedTensorHandle weightTensor(kernelInfo);
+// ScopedTensorHandle biasTensor(biasInfo);
+//
+// AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
+// AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
Convolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
- queueDescriptor.m_Weight = &weightTensor;
- queueDescriptor.m_Bias = &biasTensor;
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
@@ -3995,26 +3922,16 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
DepthwiseConvolution2dQueueDescriptor queueDescriptor;
WorkloadInfo workloadInfo;
- ScopedTensorHandle weightTensor(kernelInfo);
- ScopedTensorHandle biasTensor(biasInfo);
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo, biasHandle.get());
- // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
- // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
- // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
- // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
- AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
queueDescriptor.m_Parameters = descriptor;
- queueDescriptor.m_Weight = &weightTensor;
- queueDescriptor.m_Bias = &biasTensor;
std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
queueDescriptor,
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 5555772c5a..f7519a73bc 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,20 +44,11 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::FullyConnectedQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
- armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor;
-
data.m_Parameters.m_BiasEnabled = biasEnabled;
data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
data.m_Parameters.m_ConstantWeights = constantWeights;
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 3a757f8820..adea733582 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -477,16 +477,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dClCompiledContext
WorkloadInfo workloadInfo;
- ScopedTensorHandle weightTensor(kernelInfo);
- ScopedTensorHandle biasTensor(biasInfo);
-
- AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
- AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
Convolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
- queueDescriptor.m_Weight = &weightTensor;
- queueDescriptor.m_Bias = &biasTensor;
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 6b0a3b8352..1920f2d20b 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -88,16 +88,16 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::ICLTensor& weights = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor* bias = nullptr;
if (m_Data.m_Parameters.m_BiasEnabled)
{
- bias = &static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+ arm_compute::ICLTensor& bias = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+ m_BiasProxy = std::make_unique<ICLTensorProxy>(&bias);
}
// Create Proxy tensor and set the initial tensor handle to it
m_InputProxy = std::make_unique<ICLTensorProxy>(&input);
m_OutputProxy = std::make_unique<ICLTensorProxy>(&output);
-
+ m_WeightsProxy = std::make_unique<ICLTensorProxy>(&weights);
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
@@ -112,8 +112,8 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution2dWorkload_configure");
m_ConvolutionLayer.configure(clCompileContext,
m_InputProxy.get(),
- &weights,
- bias,
+ m_WeightsProxy.get(),
+ m_BiasProxy.get(),
m_OutputProxy.get(),
padStrideInfo,
arm_compute::WeightsInfo(),
@@ -138,11 +138,11 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString(m_ConvolutionMethod));
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 7293c830ac..2b46bb9c04 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -63,6 +63,8 @@ private:
arm_compute::ConvolutionMethod m_ConvolutionMethod;
std::unique_ptr<ICLTensorProxy> m_InputProxy;
+ std::unique_ptr<ICLTensorProxy> m_WeightsProxy;
+ std::unique_ptr<ICLTensorProxy> m_BiasProxy;
std::unique_ptr<ICLTensorProxy> m_OutputProxy;
};
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 42fe400041..041cb8b0fc 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -96,10 +96,10 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 0e1efe0239..22df04fc76 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -65,10 +65,10 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload(
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 586b9c9849..879bb747a1 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -90,11 +90,11 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
output.info()->set_data_layout(aclDataLayout);
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
+ BuildArmComputeTensor(*m_KernelTensor, info.m_InputTensorInfos[1], m_Data.m_Parameters.m_DataLayout);
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
+ BuildArmComputeTensor(*m_BiasTensor, info.m_InputTensorInfos[2], m_Data.m_Parameters.m_DataLayout);
}
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -130,11 +130,12 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString(m_ConvolutionMethod));
+
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
@@ -146,22 +147,31 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
m_ConvolutionLayer.reset(convolutionLayer.release());
ARMNN_ASSERT(m_ConvolutionLayer);
-
- InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
+ m_KernelTensorInfo = info.m_InputTensorInfos[1];
if (m_Data.m_Parameters.m_BiasEnabled)
{
- InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
+ m_BiasTensorInfo = info.m_InputTensorInfos[2];
}
-
- m_ConvolutionLayer->prepare();
- FreeTensorIfUnused(m_KernelTensor);
- FreeTensorIfUnused(m_BiasTensor);
}
void NeonConvolution2dWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution2dWorkload_Execute", this->GetGuid());
+ // The constant tensors may not be fully in place until the workload is Executed
+ if (!prepared)
+ {
+ InitializeArmComputeTensorData(*m_KernelTensor, m_KernelTensorInfo, m_Data.m_Inputs[1]);
+
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ InitializeArmComputeTensorData(*m_BiasTensor, m_BiasTensorInfo, m_Data.m_Inputs[2]);
+ }
+ m_ConvolutionLayer->prepare();
+ FreeTensorIfUnused(m_KernelTensor);
+ FreeTensorIfUnused(m_BiasTensor);
+ prepared = true;
+ }
m_ConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
index e833f2ac66..4e5c1cf5ed 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -41,10 +41,12 @@ public:
private:
std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer;
- std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
- std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
-
+ mutable std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
+ mutable std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
+ TensorInfo m_KernelTensorInfo;
+ TensorInfo m_BiasTensorInfo;
arm_compute::ConvolutionMethod m_ConvolutionMethod;
+ mutable bool prepared = false;
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index e2d0a8200f..00d9d3340e 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -141,10 +141,10 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 0b91eb37c2..7bb23f870b 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -70,15 +70,15 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
// Copy the weights' tensor into arm_compute tensor.
m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
- InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
-
+ m_WeightsTensorInfo = info.m_InputTensorInfos[1];
+ BuildArmComputeTensor(*m_WeightsTensor, m_WeightsTensorInfo);
+
if (m_Data.m_Parameters.m_BiasEnabled)
{
// Copy the biases tensor into arm_compute tensor.
m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
- InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
+ m_BiasesTensorInfo = info.m_InputTensorInfos[2];
+ BuildArmComputeTensor(*m_BiasesTensor, m_BiasesTensorInfo);
}
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
@@ -94,10 +94,10 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
@@ -107,14 +107,25 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
this->GetGuid());
// Force Compute Library to perform the necessary copying and reshaping.
- m_FullyConnectedLayer->prepare();
- FreeTensorIfUnused(m_WeightsTensor);
- FreeTensorIfUnused(m_BiasesTensor);
}
void NeonFullyConnectedWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFullyConnectedWorkload_Execute", this->GetGuid());
+ // The constant tensors may not be fully in place until the workload is Executed
+ if (!prepared)
+ {
+ InitializeArmComputeTensorData(*m_WeightsTensor, m_WeightsTensorInfo, m_Data.m_Inputs[1]);
+
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ InitializeArmComputeTensorData(*m_BiasesTensor, m_BiasesTensorInfo, m_Data.m_Inputs[2]);
+ }
+ m_FullyConnectedLayer->prepare();
+ FreeTensorIfUnused(m_WeightsTensor);
+ FreeTensorIfUnused(m_BiasesTensor);
+ prepared = true;
+ }
m_FullyConnectedLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
index 944731d7bd..d5bb932c6e 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,13 +29,16 @@ class NeonFullyConnectedWorkload : public NeonBaseWorkload<FullyConnectedQueueDe
public:
NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+
virtual void Execute() const override;
private:
std::unique_ptr<arm_compute::IFunction> m_FullyConnectedLayer;
- std::unique_ptr<arm_compute::Tensor> m_WeightsTensor;
- std::unique_ptr<arm_compute::Tensor> m_BiasesTensor;
-
+ mutable std::unique_ptr<arm_compute::Tensor> m_WeightsTensor;
+ mutable std::unique_ptr<arm_compute::Tensor> m_BiasesTensor;
+ TensorInfo m_WeightsTensorInfo;
+ TensorInfo m_BiasesTensorInfo;
+ mutable bool prepared = false;
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index f9c3718e14..9f8bb9540e 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -58,6 +58,42 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
}
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
+ TensorInfo tensorInfo,
+ const ITensorHandle* handle)
+{
+ ARMNN_ASSERT(handle);
+
+ switch(tensorInfo.GetDataType())
+ {
+ case DataType::Float16:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
+ break;
+ case DataType::Float32:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
+ break;
+ case DataType::QAsymmU8:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
+ break;
+ case DataType::QSymmS8:
+ case DataType::QAsymmS8:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
+ break;
+ case DataType::Signed32:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
+ break;
+ case DataType::QSymmS16:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
+ break;
+ case DataType::BFloat16:
+ CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
+ break;
+ default:
+ // Throw exception; assertion not called in release build.
+ throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
+ }
+};
+
+inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstTensorHandle* handle)
{
ARMNN_ASSERT(handle);