aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-10 21:12:59 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2019-12-31 10:02:58 +0000
commit94a88d2b21d9ca3f42dc3435695be31b5591230b (patch)
treeedf9381fa095561706353773c155a2c922a50dc8
parentff3c426ffd799abd66e4280da559384d86702721 (diff)
downloadarmnn-94a88d2b21d9ca3f42dc3435695be31b5591230b.tar.gz
IVGCVSW-4246 Clean build Layers with -Wextra
Change-Id: I649cd2304fb0040164763d31a12fc77c6c3bed87 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--src/armnn/ExecutionFrame.cpp1
-rw-r--r--src/armnn/ExecutionFrame.hpp2
-rw-r--r--src/armnn/Graph.cpp1
-rw-r--r--src/armnn/Layer.cpp5
-rw-r--r--src/armnn/Layer.hpp20
-rw-r--r--src/armnn/LayerSupportCommon.hpp6
-rw-r--r--src/armnn/LoadedNetwork.cpp2
-rw-r--r--src/armnn/Network.cpp3
-rw-r--r--src/armnn/layers/AbsLayer.cpp5
-rw-r--r--src/armnn/layers/AbsLayer.hpp3
-rw-r--r--src/armnn/layers/ActivationLayer.cpp4
-rw-r--r--src/armnn/layers/ActivationLayer.hpp3
-rw-r--r--src/armnn/layers/AdditionLayer.cpp5
-rw-r--r--src/armnn/layers/AdditionLayer.hpp5
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp5
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp3
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp5
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp3
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp5
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp3
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp5
-rw-r--r--src/armnn/layers/ComparisonLayer.hpp3
-rw-r--r--src/armnn/layers/ConcatLayer.cpp4
-rw-r--r--src/armnn/layers/ConcatLayer.hpp3
-rw-r--r--src/armnn/layers/ConstantLayer.cpp7
-rw-r--r--src/armnn/layers/ConstantLayer.hpp3
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.hpp3
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.hpp3
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp3
-rw-r--r--src/armnn/layers/DebugLayer.cpp8
-rw-r--r--src/armnn/layers/DebugLayer.hpp3
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp5
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.hpp3
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp5
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp3
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp4
-rw-r--r--src/armnn/layers/DequantizeLayer.hpp3
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp5
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp3
-rw-r--r--src/armnn/layers/DivisionLayer.cpp5
-rw-r--r--src/armnn/layers/DivisionLayer.hpp3
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp2
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp6
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp3
-rw-r--r--src/armnn/layers/FloorLayer.cpp5
-rw-r--r--src/armnn/layers/FloorLayer.hpp3
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp5
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp3
-rw-r--r--src/armnn/layers/GatherLayer.cpp5
-rw-r--r--src/armnn/layers/GatherLayer.hpp3
-rw-r--r--src/armnn/layers/InputLayer.cpp3
-rw-r--r--src/armnn/layers/InputLayer.hpp3
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp5
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.hpp3
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp5
-rw-r--r--src/armnn/layers/L2NormalizationLayer.hpp3
-rw-r--r--src/armnn/layers/LayerWithParameters.hpp4
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp4
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.hpp3
-rw-r--r--src/armnn/layers/LstmLayer.cpp4
-rw-r--r--src/armnn/layers/LstmLayer.hpp3
-rw-r--r--src/armnn/layers/MaximumLayer.cpp5
-rw-r--r--src/armnn/layers/MaximumLayer.hpp3
-rw-r--r--src/armnn/layers/MeanLayer.cpp5
-rw-r--r--src/armnn/layers/MeanLayer.hpp3
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp6
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp3
-rw-r--r--src/armnn/layers/MemImportLayer.cpp6
-rw-r--r--src/armnn/layers/MemImportLayer.hpp3
-rw-r--r--src/armnn/layers/MergeLayer.cpp4
-rw-r--r--src/armnn/layers/MergeLayer.hpp3
-rw-r--r--src/armnn/layers/MinimumLayer.cpp5
-rw-r--r--src/armnn/layers/MinimumLayer.hpp3
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp5
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp3
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp4
-rw-r--r--src/armnn/layers/NormalizationLayer.hpp3
-rw-r--r--src/armnn/layers/OutputLayer.cpp5
-rw-r--r--src/armnn/layers/OutputLayer.hpp3
-rw-r--r--src/armnn/layers/PadLayer.cpp5
-rw-r--r--src/armnn/layers/PadLayer.hpp3
-rw-r--r--src/armnn/layers/PermuteLayer.cpp4
-rw-r--r--src/armnn/layers/PermuteLayer.hpp3
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp13
-rw-r--r--src/armnn/layers/Pooling2dLayer.hpp3
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp6
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp3
-rw-r--r--src/armnn/layers/PreluLayer.cpp5
-rw-r--r--src/armnn/layers/PreluLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp5
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp5
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp3
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp6
-rw-r--r--src/armnn/layers/ReshapeLayer.hpp3
-rw-r--r--src/armnn/layers/ResizeLayer.cpp5
-rw-r--r--src/armnn/layers/ResizeLayer.hpp3
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp5
-rw-r--r--src/armnn/layers/RsqrtLayer.hpp3
-rw-r--r--src/armnn/layers/SliceLayer.cpp6
-rw-r--r--src/armnn/layers/SliceLayer.hpp3
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp4
-rw-r--r--src/armnn/layers/SoftmaxLayer.hpp3
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp8
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.hpp3
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp8
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.hpp3
-rw-r--r--src/armnn/layers/SplitterLayer.cpp5
-rw-r--r--src/armnn/layers/SplitterLayer.hpp3
-rw-r--r--src/armnn/layers/StackLayer.cpp6
-rw-r--r--src/armnn/layers/StackLayer.hpp3
-rw-r--r--src/armnn/layers/StandInLayer.cpp4
-rw-r--r--src/armnn/layers/StandInLayer.hpp3
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp5
-rw-r--r--src/armnn/layers/StridedSliceLayer.hpp3
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp5
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp3
-rw-r--r--src/armnn/layers/SwitchLayer.cpp5
-rw-r--r--src/armnn/layers/SwitchLayer.hpp3
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp5
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp3
-rw-r--r--src/armnn/test/CreateWorkload.hpp74
-rw-r--r--src/backends/aclCommon/test/CreateWorkloadClNeon.hpp4
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp2
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp2
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp2
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp3
130 files changed, 272 insertions, 334 deletions
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 4d952b22d9..58005e951c 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -13,6 +13,7 @@ ExecutionFrame::ExecutionFrame() {}
IExecutionFrame* ExecutionFrame::ExecuteWorkloads(IExecutionFrame* previousFrame)
{
+ boost::ignore_unused(previousFrame);
for (auto& workload: m_WorkloadQueue)
{
workload->Execute();
diff --git a/src/armnn/ExecutionFrame.hpp b/src/armnn/ExecutionFrame.hpp
index c7e7780235..3661fc7d46 100644
--- a/src/armnn/ExecutionFrame.hpp
+++ b/src/armnn/ExecutionFrame.hpp
@@ -21,7 +21,7 @@ public:
virtual IExecutionFrame* ExecuteWorkloads(IExecutionFrame* previousFrame) = 0;
virtual void PostAllocationConfigure() {};
- virtual void RegisterDebugCallback(const DebugCallbackFunction& func) {};
+ virtual void RegisterDebugCallback(const DebugCallbackFunction&) {};
};
class ExecutionFrame: public IExecutionFrame
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 42a46f51cc..0ba627ca82 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -434,6 +434,7 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
{
+ boost::ignore_unused(layer);
BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 8350ea83c1..dee47f2cba 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -195,6 +195,7 @@ Layer::Layer(unsigned int numInputSlots,
, m_BackendId()
, m_Guid(profiling::ProfilingService::Instance().NextGuid())
{
+ boost::ignore_unused(layout);
m_InputSlots.reserve(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; ++i)
{
@@ -216,7 +217,7 @@ Layer::Layer(unsigned int numInputSlots,
{
}
-void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const
+void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
{
for (auto&& inputSlot : GetInputSlots())
{
@@ -227,7 +228,7 @@ void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Gr
}
}
-void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const
+void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
{
for (auto&& outputHandler : m_OutputHandlers)
{
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 3f00a20e65..e0a1ad66f2 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -265,7 +265,7 @@ public:
// Virtuals
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const = 0;
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& factory,
@@ -326,26 +326,26 @@ protected:
virtual ~Layer() = default;
template <typename QueueDescriptor>
- void CollectQueueDescriptorInputs(QueueDescriptor& descriptor, WorkloadInfo& info, const Graph& graph) const
+ void CollectQueueDescriptorInputs(QueueDescriptor& descriptor, WorkloadInfo& info) const
{
WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
- CollectWorkloadInputs(dataCollector, graph);
+ CollectWorkloadInputs(dataCollector);
}
template <typename QueueDescriptor>
- void CollectQueueDescriptorOutputs(QueueDescriptor& descriptor, WorkloadInfo& info, const Graph& graph) const
+ void CollectQueueDescriptorOutputs(QueueDescriptor& descriptor, WorkloadInfo& info) const
{
WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
- CollectWorkloadOutputs(dataCollector, graph);
+ CollectWorkloadOutputs(dataCollector);
}
/// Helper function to reduce duplication in *Layer::CreateWorkload.
template <typename QueueDescriptor>
- WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor, const Graph& graph) const
+ WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor) const
{
WorkloadInfo info;
- CollectQueueDescriptorInputs(descriptor, info, graph);
- CollectQueueDescriptorOutputs(descriptor, info, graph);
+ CollectQueueDescriptorInputs(descriptor, info);
+ CollectQueueDescriptorOutputs(descriptor, info);
return info;
}
@@ -357,8 +357,8 @@ protected:
virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
private:
- void CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const;
- void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const;
+ void CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const;
+ void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const;
protected:
std::vector<OutputHandler> m_OutputHandlers;
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index 3e2a1241db..8fca3d49d1 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -9,6 +9,8 @@
#include <armnn/Tensor.hpp>
#include <armnn/Optional.hpp>
+#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
@@ -52,12 +54,16 @@ bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
template<typename ... Params>
bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(reasonIfUnsupported);
+ boost::ignore_unused(params...);
return true;
}
template<typename ... Params>
bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(reasonIfUnsupported);
+ boost::ignore_unused(params...);
return false;
}
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index e915d36e7b..b830e6742e 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -217,7 +217,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
}
default:
{
- auto workload = layer->CreateWorkload(m_OptimizedNetwork->GetGraph(), workloadFactory);
+ auto workload = layer->CreateWorkload(workloadFactory);
if (!workload)
{
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 3a2dc4e2e7..1406160914 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -545,7 +545,8 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen
OutputSlot& slot,
TensorHandleFactoryRegistry& registry)
{
- return ITensorHandleFactory::DeferredFactoryId;
+ boost::ignore_unused(backends, slot, registry);
+ return ITensorHandleFactory::DeferredFactoryId;
}
// For all handle factories supported on the source backend, we wish to find the one which requires the fewest copies
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index f87706aec2..f67d965086 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -19,11 +19,10 @@ AbsLayer::AbsLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> AbsLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> AbsLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
AbsQueueDescriptor descriptor;
- return factory.CreateAbs(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateAbs(descriptor, PrepInfoAndDesc(descriptor));
}
AbsLayer* AbsLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 643cf4b629..6dc55b4542 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 6f80cce968..263fb72c20 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -17,10 +17,10 @@ ActivationLayer::ActivationLayer(const ActivationDescriptor& param, const char*
{
}
-std::unique_ptr<IWorkload> ActivationLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ActivationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ActivationQueueDescriptor descriptor;
- return factory.CreateActivation(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateActivation(descriptor, PrepInfoAndDesc(descriptor));
}
ActivationLayer* ActivationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index aac05f3e32..46845e2918 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -16,8 +16,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index 02ee306033..911d4e0488 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -19,11 +19,10 @@ AdditionLayer::AdditionLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
AdditionQueueDescriptor descriptor;
- return factory.CreateAddition(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateAddition(descriptor, PrepInfoAndDesc(descriptor));
}
AdditionLayer* AdditionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 1c2acabbb2..4c80e5c03a 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -17,13 +17,12 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
AdditionLayer* Clone(Graph& graph) const override;
-
+
void Accept(ILayerVisitor& visitor) const override;
protected:
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 35bf272e41..b67c42b2e4 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -21,11 +21,10 @@ ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* nam
{
}
-std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ArgMinMaxQueueDescriptor descriptor;
- return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor));
}
ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 43ea056c9e..2d7d223d7a 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 725dbd88b2..aed744714b 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -18,8 +18,7 @@ BatchNormalizationLayer::BatchNormalizationLayer(const armnn::BatchNormalization
{
}
-std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
BOOST_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
@@ -34,7 +33,7 @@ std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const Graph&
descriptor.m_Beta = m_Beta.get();
descriptor.m_Gamma = m_Gamma.get();
- return factory.CreateBatchNormalization(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateBatchNormalization(descriptor, PrepInfoAndDesc(descriptor));
}
BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 55390b7da5..14e6a17413 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -28,8 +28,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 335811186c..7e7045291c 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -28,12 +28,11 @@ BatchToSpaceNdLayer::BatchToSpaceNdLayer(const armnn::BatchToSpaceNdDescriptor&
{
}
-std::unique_ptr<IWorkload> BatchToSpaceNdLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> BatchToSpaceNdLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
BatchToSpaceNdQueueDescriptor descriptor;
- return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor));
}
BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index 21ed616a9e..5d568cb32a 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 75518e580e..1f6e35fa85 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -20,11 +20,10 @@ ComparisonLayer::ComparisonLayer(const ComparisonDescriptor& param, const char*
{
}
-std::unique_ptr<IWorkload> ComparisonLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ComparisonLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ComparisonQueueDescriptor descriptor;
- return factory.CreateComparison(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateComparison(descriptor, PrepInfoAndDesc(descriptor));
}
ComparisonLayer* ComparisonLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index bbc2b573bf..edc66b6cf7 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found
/// @param [in] factory The workload factory which will create the workload
/// @return A pointer to the created workload, or nullptr if not created
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer
/// @param [in] graph The graph into which this layer is being cloned
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 0f847eae32..317d61f1fa 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -19,7 +19,7 @@ ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConcatQueueDescriptor descriptor;
@@ -31,7 +31,7 @@ std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const Graph& graph, const
std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
}
- return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
}
template<typename FactoryType>
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 10a7fd8e74..0d540086d7 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
/// otherwise creates tensor handlers.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 31e1549e0e..136616c204 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -18,12 +18,11 @@ ConstantLayer::ConstantLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> ConstantLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ConstantLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConstantQueueDescriptor descriptor;
descriptor.m_LayerOutput = m_LayerOutput.get();
- return factory.CreateConstant(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateConstant(descriptor, PrepInfoAndDesc(descriptor));
}
ConstantLayer* ConstantLayer::Clone(Graph& graph) const
@@ -38,7 +37,7 @@ ConstantLayer* ConstantLayer::Clone(Graph& graph) const
std::vector<TensorShape> ConstantLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- return std::vector<TensorShape>({ m_LayerOutput->GetTensorInfo().GetShape() });
+ return std::vector<TensorShape>({ inputShapes[0] });
}
void ConstantLayer::ValidateTensorShapesFromInputs()
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 7a6cf9d10d..9525522b54 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -19,8 +19,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index d3c2462bf1..026e8de8b2 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -19,11 +19,10 @@ ConvertFp16ToFp32Layer::ConvertFp16ToFp32Layer(const char* name)
{
}
-std::unique_ptr<IWorkload> ConvertFp16ToFp32Layer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ConvertFp16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConvertFp16ToFp32QueueDescriptor descriptor;
- return factory.CreateConvertFp16ToFp32(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateConvertFp16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
}
ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const
@@ -49,6 +48,7 @@ void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
+ boost::ignore_unused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index 89046fc85e..03d7dfa568 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 068594bf99..90bd8948d0 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -18,11 +18,10 @@ ConvertFp32ToFp16Layer::ConvertFp32ToFp16Layer(const char* name)
{
}
-std::unique_ptr<IWorkload> ConvertFp32ToFp16Layer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ConvertFp32ToFp16Layer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConvertFp32ToFp16QueueDescriptor descriptor;
- return factory.CreateConvertFp32ToFp16(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateConvertFp32ToFp16(descriptor, PrepInfoAndDesc(descriptor));
}
ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
@@ -48,6 +47,7 @@ void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
{
// These conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
+ boost::ignore_unused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index 9f8b09bd5a..907a55f084 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index b205315b07..55a243aa0b 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -46,7 +46,7 @@ void Convolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn
LayerWithParameters<Convolution2dDescriptor>::SerializeLayerParameters(fn);
}
-std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
@@ -60,7 +60,7 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const Graph& graph
BOOST_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
- return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
}
Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 0e85b33355..bd30826823 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -25,8 +25,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 34912731cb..d0e0f037e2 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -9,6 +9,8 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
+#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
@@ -16,8 +18,7 @@ DebugLayer::DebugLayer(const char* name)
: Layer(1, 1, LayerType::Debug, name)
{}
-std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
const Layer& prevLayer = GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
@@ -26,7 +27,7 @@ std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const Graph& graph,
descriptor.m_LayerName = prevLayer.GetNameStr();
descriptor.m_SlotIndex = GetInputSlot(0).GetConnectedOutputSlot()->CalculateIndexOnOwner();
- return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor));
}
DebugLayer* DebugLayer::Clone(Graph& graph) const
@@ -52,6 +53,7 @@ void DebugLayer::ValidateTensorShapesFromInputs()
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
+ boost::ignore_unused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index 3bd5a3dae2..d50d6185a4 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 80992fa20f..bb74232690 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -22,14 +22,13 @@ DepthToSpaceLayer::DepthToSpaceLayer(const DepthToSpaceDescriptor& param, const
: LayerWithParameters(1, 1, LayerType::DepthToSpace, param, name)
{}
-std::unique_ptr<IWorkload> DepthToSpaceLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> DepthToSpaceLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
DepthToSpaceQueueDescriptor descriptor;
descriptor.m_Parameters.m_BlockSize = m_Param.m_BlockSize;
descriptor.m_Parameters.m_DataLayout = m_Param.m_DataLayout;
- return factory.CreateDepthToSpace(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateDepthToSpace(descriptor, PrepInfoAndDesc(descriptor));
}
DepthToSpaceLayer* DepthToSpaceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index cc5abe4cc9..53ef6e324e 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 306bce57fc..f37096ac18 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -48,8 +48,7 @@ void DepthwiseConvolution2dLayer::SerializeLayerParameters(ParameterStringifyFun
LayerWithParameters<DepthwiseConvolution2dDescriptor>::SerializeLayerParameters(fn);
}
-std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
@@ -63,7 +62,7 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const Gra
BOOST_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
- return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
}
DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index f57591097c..67b6da23e3 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -24,8 +24,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 4dd30de77b..00a1d697b6 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -16,12 +16,12 @@ DequantizeLayer::DequantizeLayer(const char* name)
: Layer(1, 1, LayerType::Dequantize, name)
{}
-std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(const Graph& graph,
+std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(
const IWorkloadFactory& factory) const
{
DequantizeQueueDescriptor descriptor;
- return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor));
}
DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index 1340f96a27..c112b6026e 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 289cee0bd7..8749b33ba2 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -20,12 +20,11 @@ DetectionPostProcessLayer::DetectionPostProcessLayer(const DetectionPostProcessD
{
}
-std::unique_ptr<IWorkload> DetectionPostProcessLayer::CreateWorkload(const armnn::Graph& graph,
- const armnn::IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> DetectionPostProcessLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
{
DetectionPostProcessQueueDescriptor descriptor;
descriptor.m_Anchors = m_Anchors.get();
- return factory.CreateDetectionPostProcess(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateDetectionPostProcess(descriptor, PrepInfoAndDesc(descriptor));
}
DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index a1c499e793..a6eab116ff 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -23,8 +23,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index aa54ef664f..b27d894512 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -19,11 +19,10 @@ DivisionLayer::DivisionLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> DivisionLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> DivisionLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
DivisionQueueDescriptor descriptor;
- return factory.CreateDivision(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateDivision(descriptor, PrepInfoAndDesc(descriptor));
}
DivisionLayer* DivisionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index fdeadba78e..0a9b9fe821 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 5c2bbaa054..4f151b7f48 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -11,7 +11,7 @@ namespace armnn
{
/// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement:
-/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
+/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const IWorkloadFactory& factory) const = 0;
/// Layer* Clone(Graph& graph) const = 0;
class ElementwiseBaseLayer : public Layer
{
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index d61a80f748..90f8445472 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -18,11 +18,10 @@ FakeQuantizationLayer::FakeQuantizationLayer(const FakeQuantizationDescriptor& p
{
}
-std::unique_ptr<IWorkload> FakeQuantizationLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> FakeQuantizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
FakeQuantizationQueueDescriptor descriptor;
- return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor, graph) );
+ return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor) );
}
FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const
@@ -46,6 +45,7 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
+ boost::ignore_unused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 81e5444d88..36c360f728 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 34ad7f628e..148543cf62 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -18,11 +18,10 @@ FloorLayer::FloorLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> FloorLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> FloorLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
FloorQueueDescriptor descriptor;
- return factory.CreateFloor(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateFloor(descriptor, PrepInfoAndDesc(descriptor));
}
FloorLayer* FloorLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 9bc6de7bf3..e5b30d1ffb 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 98b81fa6a8..6b36bad713 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -19,8 +19,7 @@ FullyConnectedLayer::FullyConnectedLayer(const FullyConnectedDescriptor& param,
{
}
-std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
@@ -33,7 +32,7 @@ std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const Graph& grap
BOOST_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
- return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
}
FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index d756d433c1..7f03cc2ffe 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -24,8 +24,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index e3ce6b3dad..c276d8258f 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -18,11 +18,10 @@ GatherLayer::GatherLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> GatherLayer::CreateWorkload(const armnn::Graph& graph,
- const armnn::IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> GatherLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
{
GatherQueueDescriptor descriptor;
- return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor));
}
GatherLayer* GatherLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 9acec528d8..08629d53c8 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index f56c5b47f0..e0c2544236 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -17,8 +17,9 @@ InputLayer::InputLayer(LayerBindingId id, const char* name)
{
}
-std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
+ boost::ignore_unused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index faa48fb2e9..64138fd3cf 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index fc3044af50..9e0212f226 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -18,11 +18,10 @@ InstanceNormalizationLayer::InstanceNormalizationLayer(const InstanceNormalizati
{
}
-std::unique_ptr<IWorkload> InstanceNormalizationLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> InstanceNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
InstanceNormalizationQueueDescriptor descriptor;
- return factory.CreateInstanceNormalization(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateInstanceNormalization(descriptor, PrepInfoAndDesc(descriptor));
}
InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index 9ba56731c6..2b59b0d23a 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 88c3992d42..3d9dc538f5 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -18,11 +18,10 @@ L2NormalizationLayer::L2NormalizationLayer(const L2NormalizationDescriptor& para
{
}
-std::unique_ptr<IWorkload> L2NormalizationLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> L2NormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
L2NormalizationQueueDescriptor descriptor;
- return factory.CreateL2Normalization(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateL2Normalization(descriptor, PrepInfoAndDesc(descriptor));
}
L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 0c4b24394d..be506b7d1a 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp
index cce9ca209f..3f3bdd8050 100644
--- a/src/armnn/layers/LayerWithParameters.hpp
+++ b/src/armnn/layers/LayerWithParameters.hpp
@@ -40,10 +40,10 @@ protected:
/// Helper function to reduce duplication in *Layer::CreateWorkload.
template <typename QueueDescriptor>
- WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor, const Graph& graph) const
+ WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor) const
{
descriptor.m_Parameters = m_Param;
- return Layer::PrepInfoAndDesc(descriptor, graph);
+ return Layer::PrepInfoAndDesc(descriptor);
}
/// The parameters for the layer (not including tensor-valued weights etc.).
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 6ca15b2d6f..24b6fde339 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -18,10 +18,10 @@ namespace armnn
LogSoftmaxLayer::LogSoftmaxLayer(const LogSoftmaxDescriptor &param, const char* name)
: LayerWithParameters(1, 1, LayerType::LogSoftmax, param, name) {}
-std::unique_ptr<IWorkload> LogSoftmaxLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> LogSoftmaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
LogSoftmaxQueueDescriptor descriptor;
- return factory.CreateLogSoftmax(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateLogSoftmax(descriptor, PrepInfoAndDesc(descriptor));
}
LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index 13da542139..732e47e4cf 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 4012839dfe..1aa10ea030 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -18,7 +18,7 @@ LstmLayer::LstmLayer(const LstmDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> LstmLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> LstmLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
LstmQueueDescriptor descriptor;
@@ -68,7 +68,7 @@ std::unique_ptr<IWorkload> LstmLayer::CreateWorkload(const Graph& graph, const I
descriptor.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights.get();
}
- return factory.CreateLstm(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateLstm(descriptor, PrepInfoAndDesc(descriptor));
}
LstmLayer* LstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 584d8e2547..21421f220f 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -88,8 +88,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 5f69a470be..bfc42e9ac4 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -18,11 +18,10 @@ MaximumLayer::MaximumLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
MaximumQueueDescriptor descriptor;
- return factory.CreateMaximum(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateMaximum(descriptor, PrepInfoAndDesc(descriptor));
}
MaximumLayer* MaximumLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 9534b07071..c90a30e72e 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index c925a3e18f..30b88fa1b9 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -19,14 +19,13 @@ MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name)
: LayerWithParameters(1, 1, LayerType::Mean, param, name)
{}
-std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::Graph& graph,
- const armnn::IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
{
MeanQueueDescriptor descriptor;
descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
- return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor));
}
MeanLayer* MeanLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index d70302fa44..b7c5ed3720 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 6f3f55955e..231b28548f 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -24,12 +24,13 @@ MemCopyLayer* MemCopyLayer::Clone(Graph& graph) const
return CloneBase<MemCopyLayer>(graph, GetName());
}
-std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
+ boost::ignore_unused(factory);
MemCopyQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
- return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
void MemCopyLayer::ValidateTensorShapesFromInputs()
@@ -48,6 +49,7 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
+ boost::ignore_unused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 2c696a433c..d466d0e1c8 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 7a922f5a7c..3b0e6d295b 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -24,12 +24,13 @@ MemImportLayer* MemImportLayer::Clone(Graph& graph) const
return CloneBase<MemImportLayer>(graph, GetName());
}
-std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
+ boost::ignore_unused(factory);
MemImportQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
- return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
void MemImportLayer::ValidateTensorShapesFromInputs()
@@ -48,6 +49,7 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
+ boost::ignore_unused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 2d02c1fb41..452e5e38f9 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index 1d4dc49379..ce75950be2 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -16,9 +16,9 @@ MergeLayer::MergeLayer(const char* name)
: Layer(2, 1, LayerType::Merge, name)
{}
-std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
+ boost::ignore_unused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 66664ca952..145284475c 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 810ee182a1..fb54c3d7ae 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -19,11 +19,10 @@ MinimumLayer::MinimumLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> MinimumLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MinimumLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
MinimumQueueDescriptor descriptor;
- return factory.CreateMinimum(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateMinimum(descriptor, PrepInfoAndDesc(descriptor));
}
MinimumLayer* MinimumLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 4a7bc177be..03ca031828 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 4556a874d4..dd0303a567 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -19,11 +19,10 @@ MultiplicationLayer::MultiplicationLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
MultiplicationQueueDescriptor descriptor;
- return factory.CreateMultiplication(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateMultiplication(descriptor, PrepInfoAndDesc(descriptor));
}
MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index b9fd35a0e2..752765bfdc 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index f5b9284f31..09f8a0d00e 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -18,10 +18,10 @@ NormalizationLayer::NormalizationLayer(const NormalizationDescriptor& param, con
{
}
-std::unique_ptr<IWorkload> NormalizationLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> NormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
NormalizationQueueDescriptor descriptor;
- return factory.CreateNormalization(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateNormalization(descriptor, PrepInfoAndDesc(descriptor));
}
NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 0027acc2e5..8ba3f53d48 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 9b4cb70032..4239323635 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -9,6 +9,8 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
+#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
@@ -17,8 +19,9 @@ OutputLayer::OutputLayer(LayerBindingId id, const char* name)
{
}
-std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
+ boost::ignore_unused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index c9615cca66..8994556528 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
/// otherwise creates tensor handlers by default. Ignores parameters for Output type.
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 9e08da49c0..4fab88e615 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -19,13 +19,12 @@ PadLayer::PadLayer(const armnn::PadDescriptor& param, const char* name)
: LayerWithParameters(1, 1, LayerType::Pad, param, name)
{}
-std::unique_ptr<IWorkload> PadLayer::CreateWorkload(const armnn::Graph& graph,
- const armnn::IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> PadLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
{
PadQueueDescriptor descriptor;
descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
- return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor));
}
PadLayer* PadLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index 8174fa884f..f3cfb000bf 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 459a755117..0fc3ce4bf6 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -22,10 +22,10 @@ PermuteLayer::PermuteLayer(const PermuteDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> PermuteLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> PermuteLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
PermuteQueueDescriptor descriptor;
- return factory.CreatePermute(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreatePermute(descriptor, PrepInfoAndDesc(descriptor));
}
PermuteLayer* PermuteLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index 8413322373..4984cf26ee 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 8f5ccb9215..a3c2425097 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -24,10 +24,10 @@ Pooling2dLayer::Pooling2dLayer(const Pooling2dDescriptor& param, const char* nam
{
}
-std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
Pooling2dQueueDescriptor descriptor;
- return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor));
}
Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
@@ -57,8 +57,7 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
"Stride can only be zero when performing global pooling");
- auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto padMethod,
- auto outputShapeRounding)
+ auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
{
unsigned int readSize = inSize + lowPad + highPad - poolSize;
float div = static_cast<float>(readSize) / static_cast<float>(stride);
@@ -87,9 +86,9 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
};
outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
- m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding);
- outHeight= CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
- m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding);
+ m_Param.m_OutputShapeRounding);
+ outHeight = CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
+ m_Param.m_OutputShapeRounding);
}
unsigned int outChannels = inChannels;
unsigned int outBatchSize = inBatchSize;
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index f70ae78aa9..2563eb130b 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index bd93743eab..00a316c5c0 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -28,12 +28,11 @@ PreCompiledLayer* PreCompiledLayer::Clone(Graph& graph) const
return clone;
}
-std::unique_ptr<IWorkload> PreCompiledLayer::CreateWorkload(const armnn::Graph& graph,
- const armnn::IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> PreCompiledLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
{
PreCompiledQueueDescriptor descriptor;
descriptor.m_PreCompiledObject = m_PreCompiledObject.get();
- return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
}
void PreCompiledLayer::ValidateTensorShapesFromInputs()
@@ -49,6 +48,7 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
+ boost::ignore_unused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index ec5a9d6478..1a87f61e5b 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -25,8 +25,7 @@ public:
PreCompiledLayer(const PreCompiledDescriptor& param, const char* name);
~PreCompiledLayer();
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
PreCompiledLayer* Clone(Graph &graph) const override;
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 6040248391..d9e59224a0 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -18,12 +18,11 @@ PreluLayer::PreluLayer(const char* name)
: Layer(2, 1, LayerType::Prelu, name)
{}
-std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
PreluQueueDescriptor descriptor;
- return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor));
}
PreluLayer* PreluLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 54e57b22c1..2f2704bf73 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index d5d76e2585..701041f4b3 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -16,11 +16,10 @@ QuantizeLayer::QuantizeLayer(const char* name)
: Layer(1, 1, LayerType::Quantize, name)
{}
-std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
QuantizeQueueDescriptor descriptor;
- WorkloadInfo info = PrepInfoAndDesc(descriptor, graph);
+ WorkloadInfo info = PrepInfoAndDesc(descriptor);
return factory.CreateQuantize(descriptor, info);
}
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index fabb4492c5..32cd53f810 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -17,8 +17,7 @@ class ILayerVisitor;
class QuantizeLayer : public Layer
{
public:
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
Layer* Clone(Graph& graph) const override;
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 1d8540d563..1c22ab4c92 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -18,8 +18,7 @@ QuantizedLstmLayer::QuantizedLstmLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> QuantizedLstmLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> QuantizedLstmLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
QuantizedLstmQueueDescriptor descriptor;
@@ -39,7 +38,7 @@ std::unique_ptr<IWorkload> QuantizedLstmLayer::CreateWorkload(const Graph& graph
descriptor.m_CellBias = m_QuantizedLstmParameters.m_CellBias.get();
descriptor.m_OutputGateBias = m_QuantizedLstmParameters.m_OutputGateBias.get();
- return factory.CreateQuantizedLstm(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateQuantizedLstm(descriptor, PrepInfoAndDesc(descriptor));
}
QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 4602f71114..9e0186fc71 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -52,8 +52,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index cfce712151..3a952583e6 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -18,11 +18,10 @@ ReshapeLayer::ReshapeLayer(const ReshapeDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> ReshapeLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ReshapeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ReshapeQueueDescriptor descriptor;
- return factory.CreateReshape(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateReshape(descriptor, PrepInfoAndDesc(descriptor));
}
ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
@@ -32,6 +31,7 @@ ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
+ boost::ignore_unused(inputShapes);
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index c4743d17df..4fd5c3e11d 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 50d5f243ff..e341191de1 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -23,11 +23,10 @@ ResizeLayer::ResizeLayer(const ResizeDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ResizeQueueDescriptor descriptor;
- return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor));
}
ResizeLayer* ResizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index abf5c60efa..9ad4910cec 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index a21564ed7f..6ff7372aa7 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -19,11 +19,10 @@ RsqrtLayer::RsqrtLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> RsqrtLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> RsqrtLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
RsqrtQueueDescriptor descriptor;
- return factory.CreateRsqrt(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateRsqrt(descriptor, PrepInfoAndDesc(descriptor));
}
RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index 526a6642ca..1e51cc04ad 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index 8ea5fd8f25..e39caa5db1 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -23,11 +23,10 @@ SliceLayer::SliceLayer(const SliceDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> SliceLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SliceLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SliceQueueDescriptor descriptor;
- return factory.CreateSlice(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSlice(descriptor, PrepInfoAndDesc(descriptor));
}
SliceLayer* SliceLayer::Clone(Graph& graph) const
@@ -51,6 +50,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
+ boost::ignore_unused(inputShapes);
BOOST_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index 38f0747f05..abfe472194 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 90891b417a..cb70bbc20d 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -18,10 +18,10 @@ SoftmaxLayer::SoftmaxLayer(const SoftmaxDescriptor &param, const char* name)
{
}
-std::unique_ptr<IWorkload> SoftmaxLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SoftmaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SoftmaxQueueDescriptor descriptor;
- return factory.CreateSoftmax(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSoftmax(descriptor, PrepInfoAndDesc(descriptor));
}
SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index 82c2099ea3..839170e9b0 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index 5002160ff7..d38187c532 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -24,18 +24,18 @@ SpaceToBatchNdLayer::SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, c
: LayerWithParameters(1, 1, LayerType::SpaceToBatchNd, param, name)
{}
-std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- SpaceToBatchNdQueueDescriptor descriptor;
+ SpaceToBatchNdQueueDescriptor descriptor;
descriptor.m_Parameters.m_BlockShape = m_Param.m_BlockShape;
descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
- return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor));
}
SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
{
+ boost::ignore_unused(graph);
return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index f6616bcd21..cb8162f7cd 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index befb940b4a..f8a6eb3ed8 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -15,6 +15,8 @@
#include <numeric>
+#include <boost/core/ignore_unused.hpp>
+
using namespace armnnUtils;
namespace armnn
@@ -24,18 +26,18 @@ SpaceToDepthLayer::SpaceToDepthLayer(const SpaceToDepthDescriptor param, const c
: LayerWithParameters(1, 1, LayerType::SpaceToDepth, param, name)
{}
-std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SpaceToDepthQueueDescriptor descriptor;
descriptor.m_Parameters.m_BlockSize = m_Param.m_BlockSize;
descriptor.m_Parameters.m_DataLayout = m_Param.m_DataLayout;
- return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor));
}
SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
{
+ boost::ignore_unused(graph);
return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index b83a9e0170..799c36754d 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index e8452462f3..84a598c847 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -18,7 +18,7 @@ SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SplitterQueueDescriptor descriptor;
@@ -29,7 +29,7 @@ std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const Graph& graph, con
std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
}
- return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor));
}
template<typename FactoryType>
@@ -127,6 +127,7 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
+ boost::ignore_unused(inputShapes);
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 26d5b76a2d..a6c8cbe4d7 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
/// otherwise creates tensor handlers.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 43e0ac3134..1a060f93c8 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -19,10 +19,10 @@ StackLayer::StackLayer(const StackDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
StackQueueDescriptor descriptor;
- return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor));
}
StackLayer* StackLayer::Clone(Graph& graph) const
@@ -32,6 +32,8 @@ StackLayer* StackLayer::Clone(Graph& graph) const
std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
+ boost::ignore_unused(inputShapes);
+
const TensorShape& inputShape = m_Param.m_InputShape;
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
const unsigned int axis = m_Param.m_Axis;
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 6c845972d0..5ec2e8a55d 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index 7d693bfffb..d0fc325caa 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -14,8 +14,9 @@ StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
{
}
-std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
+ boost::ignore_unused(factory);
// This throws in the event that it's called. We would expect that any backend that
// "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
// during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -29,6 +30,7 @@ StandInLayer* StandInLayer::Clone(Graph& graph) const
std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
+ boost::ignore_unused(inputShapes);
throw Exception("Stand in layer does not support infering output shapes");
}
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index 9fe1773a27..d087b939bb 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return Does not return anything. Throws Exception if called.
- virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index 03d451c69a..b9c337188c 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -19,8 +19,7 @@ StridedSliceLayer::StridedSliceLayer(const armnn::StridedSliceDescriptor& param,
{
}
-std::unique_ptr<IWorkload> StridedSliceLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> StridedSliceLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
StridedSliceQueueDescriptor descriptor;
@@ -35,7 +34,7 @@ std::unique_ptr<IWorkload> StridedSliceLayer::CreateWorkload(const Graph& graph,
descriptor.m_Parameters.m_NewAxisMask = m_Param.m_NewAxisMask;
descriptor.m_Parameters.m_ShrinkAxisMask = m_Param.m_ShrinkAxisMask;
- return factory.CreateStridedSlice(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateStridedSlice(descriptor, PrepInfoAndDesc(descriptor));
}
StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 97b8ee2a07..07219697d5 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 6becfdcd5d..0797742aea 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -19,11 +19,10 @@ SubtractionLayer::SubtractionLayer(const char* name)
{
}
-std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SubtractionQueueDescriptor descriptor;
- return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor));
}
SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 557da65db4..21619f890d 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -18,8 +18,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 4f0eb9b703..4cacda6318 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -16,11 +16,10 @@ SwitchLayer::SwitchLayer(const char* name)
: Layer(2, 2, LayerType::Switch, name)
{}
-std::unique_ptr<IWorkload> SwitchLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> SwitchLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SwitchQueueDescriptor descriptor;
- return factory.CreateSwitch(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateSwitch(descriptor, PrepInfoAndDesc(descriptor));
}
SwitchLayer* SwitchLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index bfda8c2b1b..2a6a09db24 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -17,8 +17,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 1110e04991..dca77b4c09 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -24,8 +24,7 @@ TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolut
{
}
-std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
@@ -38,7 +37,7 @@ std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const Gra
descriptor.m_Bias = m_Bias.get();
}
- return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph));
+ return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
}
TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 4dc4644a3c..24c0e494d5 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -24,8 +24,7 @@ public:
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 0537877b99..4e7967bf40 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -30,9 +30,9 @@ using namespace std;
// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
template<typename Workload>
-std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const IWorkloadFactory& factory)
+std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, const IWorkloadFactory& factory)
{
- std::unique_ptr<IWorkload> workload = layer.CreateWorkload(graph, factory);
+ std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory);
BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()),
"Cannot convert to derived class");
std::string reasonIfUnsupported;
@@ -84,7 +84,7 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, factory);
ActivationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -120,7 +120,7 @@ std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFact
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
DescriptorType queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
@@ -147,7 +147,7 @@ std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloa
Connect(layer, output, tensorInfo, 0, 0);
CreateTensorHandles(graph, factory);
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
DescriptorType queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -199,7 +199,7 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkload
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -252,7 +252,7 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
@@ -358,7 +358,7 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
CreateTensorHandles(graph, factory);
// make the workload and check it
- auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, factory);
LstmQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
@@ -487,7 +487,7 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
CreateTensorHandles(graph, factory);
// Create workload and check layer support
- auto workload = MakeAndCheckWorkload<QuantizedLstmWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<QuantizedLstmWorkload>(*layer, factory);
QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
// Validate input/output sizes
@@ -548,7 +548,7 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
@@ -605,7 +605,7 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, factory);
DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
@@ -654,7 +654,7 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
@@ -703,7 +703,7 @@ std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IW
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, factory);
NormalizationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
@@ -755,7 +755,7 @@ std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadF
CreateTensorHandles(graph, factory);
// Make the workload and checks it
- auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, factory);
Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
@@ -801,7 +801,7 @@ std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto
CreateTensorHandles(graph, factory);
// Make the workload and checks it.
- auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, factory);
SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -850,7 +850,7 @@ std::unique_ptr<SplitterWorkload>
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, factory);
SplitterQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -927,9 +927,9 @@ std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
CreateTensorHandles(graph, factory);
BOOST_TEST_CHECKPOINT("created tensor handles");
- auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
+ auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
BOOST_TEST_CHECKPOINT("created splitter workload");
- auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, graph, factory);
+ auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
BOOST_TEST_CHECKPOINT("created concat workload");
return {std::move(workloadSplitter), std::move(workloadConcat)};
@@ -994,11 +994,11 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory&
CreateTensorHandles(graph, factory);
- auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
- auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, graph, factory);
- auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, graph, factory);
- auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, graph, factory);
- auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, graph, factory);
+ auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
+ auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, factory);
+ auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, factory);
+ auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, factory);
+ auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, factory);
wlSplitter = std::move(workloadSplitter);
wlActiv0_0 = std::move(workloadActiv0_0);
@@ -1047,7 +1047,7 @@ std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloa
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, factory);
auto queueDescriptor = workload->GetData();
BOOST_CHECK(queueDescriptor.m_Inputs.size() == 1);
@@ -1077,7 +1077,7 @@ std::unique_ptr<RsqrtWorkload> CreateRsqrtWorkloadTest(armnn::IWorkloadFactory&
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<RsqrtWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<RsqrtWorkload>(*layer, factory);
RsqrtQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1106,7 +1106,7 @@ std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, factory);
BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1142,7 +1142,7 @@ std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, factory);
L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
@@ -1175,7 +1175,7 @@ std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFacto
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, factory);
ReshapeQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1204,7 +1204,7 @@ std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32Workloa
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, factory);
ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1233,7 +1233,7 @@ std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16Workloa
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, factory);
ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1264,7 +1264,7 @@ std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& fa
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, factory);
MeanQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
@@ -1312,7 +1312,7 @@ std::unique_ptr<ConcatWorkload> CreateConcatWorkloadTest(armnn::IWorkloadFactory
CreateTensorHandles(graph, factory);
BOOST_TEST_CHECKPOINT("created tensor handles");
- auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, graph, factory);
+ auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
BOOST_TEST_CHECKPOINT("created concat workload");
return workloadConcat;
@@ -1438,7 +1438,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
CreateTensorHandles(optimisedGraph, factory);
// Make the workload and check it.
- auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, optimisedGraph, factory);
+ auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, factory);
PreCompiledQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1470,7 +1470,7 @@ std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFac
CreateTensorHandles(graph, factory);
BOOST_TEST_CHECKPOINT("created tensor handles");
- auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, graph, factory);
+ auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, factory);
BOOST_TEST_CHECKPOINT("created Constant workload");
return workloadConstant;
@@ -1506,7 +1506,7 @@ std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory&
CreateTensorHandles(graph, factory);
// Makes the workload and checks it
- auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, factory);
PreluQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
@@ -1538,7 +1538,7 @@ std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWor
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, factory);
SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
@@ -1585,7 +1585,7 @@ std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory&
CreateTensorHandles(graph, factory);
- auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, graph, factory);
+ auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, factory);
StackQueueDescriptor queueDescriptor = stackWorkload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == numInputs);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
index 53d4dc9154..83cec2a746 100644
--- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -87,8 +87,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory)
output->CreateTensorHandles(registry, refFactory);
// make the workloads and check them
- auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, graph, factory);
- auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, graph, refFactory);
+ auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
+ auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index cbe74b856f..2df4c8f903 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -263,7 +263,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
BOOST_CHECK_NO_THROW(
- layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+ layer->CreateWorkload(fact));
}
}
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index f8c1a327ef..824375ef86 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -40,7 +40,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
BOOST_CHECK_NO_THROW(
- layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+ layer->CreateWorkload(fact));
}
}
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 09e231bfd7..093b5f29b4 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -40,7 +40,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
BOOST_CHECK_NO_THROW(
- layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+ layer->CreateWorkload(fact));
}
}
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index cd3708cc4f..09a7914859 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -78,8 +78,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
armnn::RefWorkloadFactory fact;
for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
{
- BOOST_CHECK_NO_THROW(
- layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+ BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
}
}