aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/ClLayerSupport.cpp11
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp3
-rw-r--r--src/backends/cl/OpenClTimer.cpp3
3 files changed, 13 insertions, 4 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 6e1e9d98c6..bce91ab462 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -88,8 +88,10 @@ bool IsMatchingStride(uint32_t actualStride)
return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
}
-bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
+template<typename ... Args>
+bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
+ boost::ignore_unused(reasonIfUnsupported, (args)...);
#if defined(ARMCOMPUTECL_ENABLED)
return true;
#else
@@ -124,7 +126,7 @@ inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIf
return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
#else
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
- return IsClBackendSupported(reasonIfUnsupported);
+ return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
#endif
template<typename FloatFunc, typename Uint8Func, typename ... Params>
@@ -461,7 +463,7 @@ bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsClBackendSupported(reasonIfUnsupported);
+ return IsClBackendSupported(reasonIfUnsupported, input);
}
bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
@@ -579,7 +581,7 @@ bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsClBackendSupported(reasonIfUnsupported);
+ return IsClBackendSupported(reasonIfUnsupported, output);
}
bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
@@ -758,6 +760,7 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
*splitAxis.begin());
}
#endif
+ boost::ignore_unused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 531f3710ea..4746167795 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -23,6 +23,7 @@
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
+#include <boost/core/ignore_unused.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/format.hpp>
@@ -84,6 +85,7 @@ ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& mem
std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
+ boost::ignore_unused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
@@ -94,6 +96,7 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const Tenso
DataLayout dataLayout,
const bool IsMemoryManaged) const
{
+ boost::ignore_unused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
diff --git a/src/backends/cl/OpenClTimer.cpp b/src/backends/cl/OpenClTimer.cpp
index 57552d7bd9..ee3c114ba0 100644
--- a/src/backends/cl/OpenClTimer.cpp
+++ b/src/backends/cl/OpenClTimer.cpp
@@ -8,6 +8,8 @@
#include <string>
#include <sstream>
+#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
@@ -29,6 +31,7 @@ void OpenClTimer::Start()
const cl_event * event_wait_list,
cl_event * event)
{
+ boost::ignore_unused(event);
cl_int retVal = 0;
// Get the name of the kernel