aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-10 22:07:09 +0000
committerDerek Lamberti <derek.lamberti@arm.com>2020-01-02 15:16:28 +0000
commit901ea11e3da6b97df52bdc2a547990402e920ede (patch)
tree3c93b40643d274d6bb77abfcde99677b80d4513a /src/backends/backendsCommon
parentb4f312cefc70df2cdacd5a52a522fc8b027cc6e8 (diff)
downloadarmnn-901ea11e3da6b97df52bdc2a547990402e920ede.tar.gz
IVGCVSW-4246 Clean build of backends with -Wextra
Change-Id: I9e8d5576b3ec04c871785d5f2f9545bf1136e59b Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/IBackendInternal.cpp6
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp399
-rw-r--r--src/backends/backendsCommon/LayerSupportRules.hpp2
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp3
-rw-r--r--src/backends/backendsCommon/Workload.hpp2
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp230
-rw-r--r--src/backends/backendsCommon/WorkloadFactoryBase.hpp238
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/MockBackend.cpp2
-rw-r--r--src/backends/backendsCommon/test/MockBackend.hpp28
-rw-r--r--src/backends/backendsCommon/test/TestDynamicBackend.cpp3
12 files changed, 461 insertions, 455 deletions
diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp
index d1422d1c22..ad09730e33 100644
--- a/src/backends/backendsCommon/IBackendInternal.cpp
+++ b/src/backends/backendsCommon/IBackendInternal.cpp
@@ -10,7 +10,7 @@ namespace armnn
ARMNN_NO_DEPRECATE_WARN_BEGIN
IBackendInternal::ISubGraphConverterPtr IBackendInternal::CreateSubGraphConverter(
- const std::shared_ptr<SubGraph>& subGraph) const
+ const std::shared_ptr<SubGraph>& /*subGrapg*/) const
{
return ISubGraphConverterPtr{};
}
@@ -20,7 +20,7 @@ IBackendInternal::Optimizations IBackendInternal::GetOptimizations() const
return Optimizations{};
}
-IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& subGraph,
+IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& /*subGraph*/,
bool& optimizationAttempted) const
{
optimizationAttempted = false;
@@ -34,7 +34,7 @@ IMemoryManagerUniquePtr IBackendInternal::CreateMemoryManager() const
}
IBackendInternal::IWorkloadFactoryPtr IBackendInternal::CreateWorkloadFactory(
- class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
+ class TensorHandleFactoryRegistry& /*tensorHandleFactoryRegistry*/) const
{
return IWorkloadFactoryPtr{};
}
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 00f1d0223d..8332774202 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -34,295 +34,292 @@ bool DefaultLayerSupport(const char* func,
namespace armnn
{
-bool LayerSupportBase::IsAbsSupported(const TensorInfo &input,
- const TensorInfo &output,
+bool LayerSupportBase::IsAbsSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsActivationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
+bool LayerSupportBase::IsActivationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const ActivationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsAdditionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
- const armnn::ArgMinMaxDescriptor& descriptor,
+bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &/*input*/,
+ const armnn::TensorInfo &/*output*/,
+ const armnn::ArgMinMaxDescriptor& /*descriptor*/,
armnn::Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const TensorInfo& /*mean*/,
+ const TensorInfo& /*var*/,
+ const TensorInfo& /*beta*/,
+ const TensorInfo& /*gamma*/,
+ const BatchNormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& input,
- const TensorInfo& output,
- const BatchToSpaceNdDescriptor& descriptor,
+bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const BatchToSpaceNdDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsComparisonSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- const ComparisonDescriptor& descriptor,
+bool LayerSupportBase::IsComparisonSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
+ const ComparisonDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> /*inputs*/,
+ const TensorInfo& /*output*/,
+ const OriginsDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
+bool LayerSupportBase::IsConstantSupported(const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const Convolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsDebugSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthToSpaceDescriptor& descriptor,
+bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const DepthToSpaceDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const DepthwiseConvolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
- const TensorInfo& scores,
- const TensorInfo& anchors,
- const TensorInfo& detectionBoxes,
- const TensorInfo& detectionClasses,
- const TensorInfo& detectionScores,
- const TensorInfo& numDetections,
- const DetectionPostProcessDescriptor& descriptor,
+bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& /*boxEncodings*/,
+ const TensorInfo& /*scores*/,
+ const TensorInfo& /*anchors*/,
+ const TensorInfo& /*detectionBoxes*/,
+ const TensorInfo& /*detectionClasses*/,
+ const TensorInfo& /*detectionScores*/,
+ const TensorInfo& /*numDetections*/,
+ const DetectionPostProcessDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const DepthwiseConvolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDivisionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& input0,
- const armnn::TensorInfo& input1,
- const armnn::TensorInfo& output,
+bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/,
+ const armnn::TensorInfo& /*input1*/,
+ const armnn::TensorInfo& /*output*/,
armnn::Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
+bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& /*input*/,
+ const FakeQuantizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFloorSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsFloorSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
+bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const TensorInfo& /*weights*/,
+ const TensorInfo& /*biases*/,
+ const FullyConnectedDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& input0,
- const armnn::TensorInfo& input1,
- const armnn::TensorInfo& output,
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& /*input0*/,
+ const armnn::TensorInfo& /*input1*/,
+ const armnn::TensorInfo& /*output*/,
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsGreaterSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
+bool LayerSupportBase::IsInputSupported(const TensorInfo& /*input*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const InstanceNormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const InstanceNormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const L2NormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& input,
- const TensorInfo& output,
- const LogSoftmaxDescriptor& descriptor,
+bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const LogSoftmaxDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsLstmSupported(const TensorInfo& input,
- const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn,
- const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const LstmDescriptor& descriptor,
- const LstmInputParamsInfo& paramsInfo,
+bool LayerSupportBase::IsLstmSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*outputStateIn*/,
+ const TensorInfo& /*cellStateIn*/,
+ const TensorInfo& /*scratchBuffer*/,
+ const TensorInfo& /*outputStateOut*/,
+ const TensorInfo& /*cellStateOut*/,
+ const TensorInfo& /*output*/,
+ const LstmDescriptor& /*descriptor*/,
+ const LstmInputParamsInfo& /*paramsInfo*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMaximumSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMaximumSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMeanSupported(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
+bool LayerSupportBase::IsMeanSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const MeanDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& input,
- const armnn::TensorInfo& output,
- armnn::Optional<std::string &> reasonIfUnsupported) const
+bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& /*input*/,
+ const armnn::TensorInfo& /*output*/,
+ armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
{
- boost::ignore_unused(input);
- boost::ignore_unused(output);
return true;
}
-bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& input,
- const armnn::TensorInfo& output,
- armnn::Optional<std::string &> reasonIfUnsupported) const
+bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& /*input*/,
+ const armnn::TensorInfo& /*output*/,
+ armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
{
- boost::ignore_unused(input);
- boost::ignore_unused(output);
return true;
}
-bool LayerSupportBase::IsMergeSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMergeSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
@@ -336,180 +333,180 @@ bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> in
return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMinimumSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const NormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsOutputSupported(const TensorInfo& output,
+bool LayerSupportBase::IsOutputSupported(const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPadSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
+bool LayerSupportBase::IsPadSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const PadDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPermuteSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
+bool LayerSupportBase::IsPermuteSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const PermuteDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
+bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const Pooling2dDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input,
- const PreCompiledDescriptor& descriptor,
+bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& /*input*/,
+ const PreCompiledDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPreluSupported(const TensorInfo& input,
- const TensorInfo& alpha,
- const TensorInfo& output,
+bool LayerSupportBase::IsPreluSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*alpha*/,
+ const TensorInfo& /*output*/,
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
- const armnn::TensorInfo& output,
+bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& /*input*/,
+ const armnn::TensorInfo& /*output*/,
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& input,
- const TensorInfo& previousCellStateIn,
- const TensorInfo& previousOutputIn,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const QuantizedLstmInputParamsInfo& paramsInfo,
+bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*previousCellStateIn*/,
+ const TensorInfo& /*previousOutputIn*/,
+ const TensorInfo& /*cellStateOut*/,
+ const TensorInfo& /*output*/,
+ const QuantizedLstmInputParamsInfo& /*paramsInfo*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
- const ReshapeDescriptor& descriptor,
+bool LayerSupportBase::IsReshapeSupported(const TensorInfo& /*input*/,
+ const ReshapeDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeSupported(const TensorInfo& input,
- const TensorInfo& output,
- const ResizeDescriptor& descriptor,
+bool LayerSupportBase::IsResizeSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const ResizeDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
- const TensorInfo &output,
+bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &/*input*/,
+ const TensorInfo &/*output*/,
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSliceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SliceDescriptor& descriptor,
+bool LayerSupportBase::IsSliceSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SliceDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
+bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SoftmaxDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-
-bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToBatchNdDescriptor& descriptor,
+/**/
+bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SpaceToBatchNdDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToDepthDescriptor& descriptor,
+bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SpaceToDepthDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
+ const ViewsDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
- const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
- const ViewsDescriptor& descriptor,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
+ const std::vector<std::reference_wrapper<TensorInfo>>& /*outputs*/,
+ const ViewsDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
- const TensorInfo& output,
- const StackDescriptor& descriptor,
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& /*inputs*/,
+ const TensorInfo& /*output*/,
+ const StackDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const StandInDescriptor& descriptor,
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& /*inputs*/,
+ const std::vector<const TensorInfo*>& /*outputs*/,
+ const StandInDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
if (reasonIfUnsupported)
@@ -523,36 +520,36 @@ bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&
return false;
}
-bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const StridedSliceDescriptor& descriptor,
+bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const StridedSliceDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSwitchSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output0,
- const TensorInfo& output1,
+bool LayerSupportBase::IsSwitchSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output0*/,
+ const TensorInfo& /*output1*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TransposeConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const TransposeConvolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index bf997dbff7..08189f9999 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -55,7 +55,7 @@ struct Rule
};
template<typename T>
-bool AllTypesAreEqualImpl(T t)
+bool AllTypesAreEqualImpl(T)
{
return true;
}
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 71358bbbef..9d8174ce7d 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -31,6 +31,9 @@ struct MakeWorkloadForType<NullWorkload>
const WorkloadInfo& info,
Args&&... args)
{
+ boost::ignore_unused(descriptor);
+ boost::ignore_unused(info);
+ boost::ignore_unused(args...);
return nullptr;
}
};
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index b5851ad7b9..e03068618b 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -27,7 +27,7 @@ public:
virtual profiling::ProfilingGuid GetGuid() const = 0;
- virtual void RegisterDebugCallback(const DebugCallbackFunction& func) {}
+ virtual void RegisterDebugCallback(const DebugCallbackFunction& /*func*/) {}
};
// NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index d9a1f46c9f..c3dd601fbd 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2554,7 +2554,7 @@ void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
"output_1");
}
-void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
{
// This is internally generated so it should not need validation.
}
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 805ec7ba5f..a4327e441a 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1074,358 +1074,358 @@ bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLaye
}
// Default Implementations
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
- const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
- const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
- const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
- const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
- const InstanceNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+ const InstanceNormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo&/**/ /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
- const WorkloadInfo &info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
+ const WorkloadInfo &/*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+/**/
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
- const TransposeConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+ const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 6f7437413a..1947c6935b 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -16,240 +16,242 @@ public:
bool SupportsSubTensors() const override
{ return false; };
- std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, TensorShape const& subTensorShape,
- unsigned int const *subTensorOrigin) const override
+ std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& /*parent*/,
+ TensorShape const& /*subTensorShape*/,
+ unsigned int const */*subTensorOrigin*/) const override
{ return nullptr; };
- std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
- const bool IsMemoryManaged = true) const override
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& /*tensorInfo*/,
+ const bool /*IsMemoryManaged*/) const override
{ return nullptr; }
- std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, DataLayout dataLayout,
- const bool IsMemoryManaged = true) const override
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& /*tensorInfo*/,
+ DataLayout /*dataLayout*/,
+ const bool /*IsMemoryManaged*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
};
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 7ab5ee4ec4..6924beb820 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -414,6 +414,7 @@ struct LayerTypePolicy<armnn::LayerType::name, DataType> \
static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
unsigned int nIn, unsigned int nOut) \
{ \
+ boost::ignore_unused(factory, nIn, nOut); \
return std::unique_ptr<armnn::IWorkload>(); \
} \
};
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index c5a4ed9e52..367d9cb8a5 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -88,7 +88,7 @@ const BackendId& MockBackend::GetIdStatic()
}
IBackendInternal::IWorkloadFactoryPtr MockBackend::CreateWorkloadFactory(
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
+ const IBackendInternal::IMemoryManagerSharedPtr& /*memoryManager*/) const
{
return IWorkloadFactoryPtr{};
}
diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp
index 437f23d2b3..771e499280 100644
--- a/src/backends/backendsCommon/test/MockBackend.hpp
+++ b/src/backends/backendsCommon/test/MockBackend.hpp
@@ -37,32 +37,32 @@ public:
class MockLayerSupport : public LayerSupportBase {
public:
- bool IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsInputSupported(const TensorInfo& /*input*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
- bool IsOutputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsOutputSupported(const TensorInfo& /*input*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
- bool IsAdditionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsAdditionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
- bool IsConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsConvolution2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const Convolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
diff --git a/src/backends/backendsCommon/test/TestDynamicBackend.cpp b/src/backends/backendsCommon/test/TestDynamicBackend.cpp
index a53f1698c2..cbfe09377e 100644
--- a/src/backends/backendsCommon/test/TestDynamicBackend.cpp
+++ b/src/backends/backendsCommon/test/TestDynamicBackend.cpp
@@ -7,6 +7,8 @@
#include <armnn/backends/IBackendInternal.hpp>
+#include <boost/core/ignore_unused.hpp>
+
constexpr const char* TestDynamicBackendId()
{
#if defined(VALID_TEST_DYNAMIC_BACKEND_1)
@@ -63,6 +65,7 @@ public:
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager) const override
{
+ boost::ignore_unused(memoryManager);
return IWorkloadFactoryPtr{};
}
ILayerSupportSharedPtr GetLayerSupport() const override