aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp18
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp12
-rw-r--r--src/backends/reference/test/RefWorkloadFactoryHelper.hpp1
-rw-r--r--src/backends/reference/workloads/ArgMinMax.cpp2
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp2
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp2
-rw-r--r--src/backends/reference/workloads/Gather.cpp2
-rw-r--r--src/backends/reference/workloads/Pooling2d.cpp2
8 files changed, 29 insertions, 12 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 19b76152f3..ebcd1f633e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -648,6 +648,8 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
+
bool supported = true;
std::array<DataType,3> supportedInputTypes =
@@ -863,8 +865,8 @@ bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
reasonIfUnsupported);
}
-bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
+bool RefLayerSupport::IsInputSupported(const TensorInfo& /*input*/,
+ Optional<std::string&> /*reasonIfUnsupported*/) const
{
return true;
}
@@ -1301,8 +1303,8 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
return supported;
}
-bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
+bool RefLayerSupport::IsOutputSupported(const TensorInfo& /*output*/,
+ Optional<std::string&> /*reasonIfUnsupported*/) const
{
return true;
}
@@ -1470,6 +1472,7 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
@@ -1524,7 +1527,7 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType, 3> supportedTypes =
@@ -1551,7 +1554,7 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
@@ -1578,7 +1581,7 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
@@ -1811,6 +1814,7 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 8d044eecb7..dffb13db2d 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -77,19 +77,21 @@ bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
}
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
- const bool IsMemoryManaged) const
+ const bool isMemoryManaged) const
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
+ boost::ignore_unused(isMemoryManaged);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout,
- const bool IsMemoryManaged) const
+ const bool isMemoryManaged) const
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
+ boost::ignore_unused(isMemoryManaged, dataLayout);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
@@ -218,6 +220,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueu
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
+ boost::ignore_unused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
@@ -253,6 +256,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDes
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
+ boost::ignore_unused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
@@ -410,8 +414,8 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQu
return std::make_unique<RefPooling2dWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return nullptr;
}
diff --git a/src/backends/reference/test/RefWorkloadFactoryHelper.hpp b/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
index b49a6dd3d5..10e5b9fa28 100644
--- a/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
+++ b/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
@@ -25,6 +25,7 @@ struct WorkloadFactoryHelper<armnn::RefWorkloadFactory>
static armnn::RefWorkloadFactory GetFactory(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
{
+ boost::ignore_unused(memoryManager);
return armnn::RefWorkloadFactory();
}
};
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index 76616f1465..db85b958e9 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -15,6 +15,8 @@ namespace armnn
void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
{
+ boost::ignore_unused(outputTensorInfo);
+
unsigned int uAxis = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
const unsigned int outerElements = armnnUtils::GetNumElementsBetween(inputTensorInfo.GetShape(), 0, uAxis);
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index fafc03e69b..4025e8d7fa 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -5,6 +5,7 @@
#include "Dequantize.hpp"
+#include <boost/core/ignore_unused.hpp>
namespace armnn
{
@@ -13,6 +14,7 @@ void Dequantize(Decoder<float>& inputDecoder,
const TensorInfo& inputInfo,
const TensorInfo& outputInfo)
{
+ boost::ignore_unused(outputInfo);
BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index d475dd8ac0..3b384f1480 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -155,6 +155,8 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
float* detectionScores,
float* numDetections)
{
+ boost::ignore_unused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
+
// Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
// which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
std::vector<float> boxCorners(boxEncodingsInfo.GetNumElements());
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index c848a7c138..5416855f48 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -9,6 +9,7 @@
#include <backendsCommon/WorkloadData.hpp>
+#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -21,6 +22,7 @@ void Gather(const TensorInfo& paramsInfo,
const int32_t* indices,
Encoder<float>& output)
{
+ boost::ignore_unused(outputInfo);
const TensorShape& paramsShape = paramsInfo.GetShape();
unsigned int paramsProduct = 1;
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index ea8f4ee7e7..8ff2eb457a 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -84,7 +84,7 @@ namespace
{
case PoolingAlgorithm::Max:
{
- return [](float & accumulated, float kernelSize) {};
+ return [](float & /*accumulated*/, float /*kernelSize*/) {};
}
case PoolingAlgorithm::Average: