aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2018-10-19 16:46:15 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commita8e06ed540a934f966679e1ef1cf7acf295211b3 (patch)
tree247035d1bce6ddb8543081a88294a85e34968035
parentf0b4845c1c6f24f59d4c88473b852cf69a3c7ae9 (diff)
downloadarmnn-a8e06ed540a934f966679e1ef1cf7acf295211b3.tar.gz
IVGCVSW-1955: Unify backend exceptions (wrap cl::Error)
* Added wrapper function around arm_compute::IFunction::run() that catches cl::Error and wraps it into an armnn::RuntimeException * Added MakeWorkload template inside ClWorkloadFactory that catches cl::Error and wraps it into an armnn::RuntimeException * Replaced cl::Error with armnn::RuntimeException in catch statements inside LoadedNetwork Change-Id: I2340f41ae02b8db1d7ef5157824a50e7410854e3
-rw-r--r--include/armnn/Exceptions.hpp5
-rw-r--r--src/armnn/LoadedNetwork.cpp56
-rw-r--r--src/backends/MakeWorkloadHelper.hpp10
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp69
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp10
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp3
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDivisionFloatWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClFloorFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMultiplicationWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPadWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPermuteWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPooling2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClReshapeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp3
-rw-r--r--src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp3
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp24
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp40
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp4
30 files changed, 162 insertions, 107 deletions
diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp
index 29d874cd05..008617d457 100644
--- a/include/armnn/Exceptions.hpp
+++ b/include/armnn/Exceptions.hpp
@@ -110,6 +110,11 @@ class BadOptionalAccessException : public Exception
using Exception::Exception;
};
+class RuntimeException : public Exception
+{
+ using Exception::Exception;
+};
+
template <typename ExceptionType>
void ConditionalThrow(bool condition, const std::string& message)
{
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 4f73bda832..f49fa7b878 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -11,10 +11,6 @@
#include "Profiling.hpp"
#include "HeapProfiling.hpp"
-#ifdef ARMCOMPUTECL_ENABLED
-#include <arm_compute/core/CL/OpenCL.h>
-#endif
-
#include <backends/CpuTensorHandle.hpp>
#include <boost/polymorphic_cast.hpp>
@@ -38,15 +34,6 @@ std::string ToErrorMessage(const char * prefix, const ExceptionType & error)
return ss.str();
}
-#if ARMCOMPUTECL_ENABLED
-std::string ToErrorMessage(const char * prefix, const cl::Error& error)
-{
- std::stringstream ss;
- ss << prefix << " " << error.what() << ". CL error code is: " << error.err();
- return ss.str();
-}
-#endif
-
} // anonymous
std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
@@ -54,30 +41,30 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<
{
std::unique_ptr<LoadedNetwork> loadedNetwork;
+ auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
+ {
+ errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
+ BOOST_LOG_TRIVIAL(error) << errorMessage;
+
+ return std::unique_ptr<LoadedNetwork>();
+ };
+
try
{
loadedNetwork.reset(new LoadedNetwork(std::move(net)));
}
- catch (const std::runtime_error& error)
+ catch (const armnn::RuntimeException& error)
{
- errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
- BOOST_LOG_TRIVIAL(error) << errorMessage;
- return std::unique_ptr<LoadedNetwork>();
+ return Fail(error);
}
catch (const armnn::Exception& error)
{
- errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
- BOOST_LOG_TRIVIAL(error) << errorMessage;
- return std::unique_ptr<LoadedNetwork>();
+ return Fail(error);
}
-#if ARMCOMPUTECL_ENABLED
- catch (const cl::Error& error)
+ catch (const std::runtime_error& error)
{
- errorMessage = ToErrorMessage("A CL error occurred attempting to prepare a network workload: ", error);
- BOOST_LOG_TRIVIAL(error) << errorMessage;
- return std::unique_ptr<LoadedNetwork>();
+ return Fail(error);
}
-#endif
return loadedNetwork;
}
@@ -420,6 +407,12 @@ bool LoadedNetwork::Execute()
m_CpuAcc.Acquire();
m_GpuAcc.Acquire();
+ auto Fail = [&](const std::exception& error)
+ {
+ BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what();
+ success = false;
+ };
+
try
{
for (size_t i = 0; i < m_WorkloadQueue.size(); ++i)
@@ -427,18 +420,13 @@ bool LoadedNetwork::Execute()
m_WorkloadQueue[i]->Execute();
}
}
-#if ARMCOMPUTECL_ENABLED
- catch (const cl::Error& error)
+ catch (const RuntimeException& error)
{
- BOOST_LOG_TRIVIAL(error) << "A CL error occurred attempting to execute a workload: "
- << error.what() << ". CL error code is: " << error.err();
- success = false;
+ Fail(error);
}
-#endif
catch (const std::runtime_error& error)
{
- BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what();
- success = false;
+ Fail(error);
}
// Informs the memory managers to release memory in it's respective memory group
diff --git a/src/backends/MakeWorkloadHelper.hpp b/src/backends/MakeWorkloadHelper.hpp
index 281a65a21e..78a9669530 100644
--- a/src/backends/MakeWorkloadHelper.hpp
+++ b/src/backends/MakeWorkloadHelper.hpp
@@ -39,7 +39,9 @@ struct MakeWorkloadForType<NullWorkload>
// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
template <typename Float16Workload, typename Float32Workload, typename Uint8Workload, typename QueueDescriptorType,
typename... Args>
-std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args)
+std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info,
+ Args&&... args)
{
const DataType dataType = !info.m_InputTensorInfos.empty() ?
info.m_InputTensorInfos[0].GetDataType()
@@ -67,9 +69,11 @@ std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, c
// FloatWorkload, Uint8Workload>.
// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
-std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args)
+std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info,
+ Args&&... args)
{
- return MakeWorkload<FloatWorkload, FloatWorkload, Uint8Workload>(descriptor, info,
+ return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload>(descriptor, info,
std::forward<Args>(args)...);
}
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 68d371388c..e1d8314d82 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -16,12 +16,13 @@
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <backends/cl/workloads/ClWorkloads.hpp>
-
#include <backends/MemCopyWorkload.hpp>
-#include <backends/cl/ClTensorHandle.hpp>
#include <backends/aclCommon/memory/IPoolManager.hpp>
+
+#include <backends/cl/ClTensorHandle.hpp>
+#include <backends/cl/workloads/ClWorkloads.hpp>
+#include <backends/cl/workloads/ClWorkloadUtils.hpp>
#endif
#include <backends/MakeWorkloadHelper.hpp>
@@ -42,6 +43,36 @@ bool ClWorkloadFactory::IsLayerSupported(const Layer& layer,
#ifdef ARMCOMPUTECL_ENABLED
+template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
+std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info,
+ Args&&... args)
+{
+ try
+ {
+ return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
+ }
+ catch (const cl::Error& clError)
+ {
+ throw WrapClError(clError, CHECK_LOCATION());
+ }
+}
+
+template <typename Workload, typename QueueDescriptorType, typename... Args>
+std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info,
+ Args&&... args)
+{
+ try
+ {
+ return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
+ }
+ catch (const cl::Error& clError)
+ {
+ throw WrapClError(clError, CHECK_LOCATION());
+ }
+}
+
ClWorkloadFactory::ClWorkloadFactory()
: m_MemoryManager(std::make_unique<arm_compute::CLBufferAllocator>())
{
@@ -100,26 +131,26 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDesc
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClActivationWorkload>(descriptor, info);
+ return MakeWorkload<ClActivationWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
return MakeWorkload<ClSoftmaxFloatWorkload, ClSoftmaxUint8Workload>(descriptor, info,
- m_MemoryManager.GetIntraLayerManager());
+ m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClSplitterWorkload>(descriptor, info);
+ return MakeWorkload<ClSplitterWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClMergerWorkload>(descriptor, info);
+ return MakeWorkload<ClMergerWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateFullyConnected(
@@ -132,25 +163,25 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateFullyConnected(
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClPermuteWorkload>(descriptor, info);
+ return MakeWorkload<ClPermuteWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClPooling2dWorkload>(descriptor, info);
+ return MakeWorkload<ClPooling2dWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClConvolution2dWorkload>(descriptor, info, m_MemoryManager.GetIntraLayerManager());
+ return MakeWorkload<ClConvolution2dWorkload>(descriptor, info, m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return std::make_unique<ClDepthwiseConvolutionWorkload>(descriptor, info);
+ return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
@@ -162,13 +193,13 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const N
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClAdditionWorkload>(descriptor, info);
+ return MakeWorkload<ClAdditionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMultiplication(
const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return std::make_unique<ClMultiplicationWorkload>(descriptor, info);
+ return MakeWorkload<ClMultiplicationWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateDivision(
@@ -180,7 +211,7 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateDivision(
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClSubtractionWorkload>(descriptor, info);
+ return MakeWorkload<ClSubtractionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization(
@@ -223,13 +254,13 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2Norm
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClConstantWorkload>(descriptor, info);
+ return MakeWorkload<ClConstantWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClReshapeWorkload>(descriptor, info);
+ return MakeWorkload<ClReshapeWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
@@ -248,14 +279,14 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp16ToFp32(
const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClConvertFp16ToFp32Workload>(descriptor, info);
+ return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp32ToFp16(
const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClConvertFp32ToFp16Workload>(descriptor, info);
+ return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
@@ -267,7 +298,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescript
std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<ClPadWorkload>(descriptor, info);
+ return MakeWorkload<ClPadWorkload>(descriptor, info);
}
void ClWorkloadFactory::Finalize()
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 9f8ec62db7..66de3a50f1 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -132,6 +132,16 @@ public:
private:
#ifdef ARMCOMPUTECL_ENABLED
+ template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
+ static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info,
+ Args&&... args);
+
+ template <typename Workload, typename QueueDescriptorType, typename... Args>
+ static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info,
+ Args&&... args);
+
mutable ClMemoryManager m_MemoryManager;
#endif
};
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 426af9f16d..188ad3283e 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -53,7 +53,7 @@ ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& desc
void ClActivationWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationWorkload_Execute");
- m_ActivationLayer.run();
+ RunClFunction(m_ActivationLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index c9ac958402..6ec207a956 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -32,7 +32,7 @@ ClAdditionWorkload::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor
void ClAdditionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index 24be7cddca..1f3f9b540a 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -94,7 +94,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
void ClBatchNormalizationFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 2c9a0e1fc2..b489ced066 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -29,7 +29,7 @@ ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload(
void ClConvertFp16ToFp32Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp16ToFp32Workload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 6758180a6e..781607f716 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -29,7 +29,7 @@ ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload(
void ClConvertFp32ToFp16Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp32ToFp16Workload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 301859ee1b..7c876ab7bb 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -106,8 +106,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
void ClConvolution2dWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
-
- m_ConvolutionLayer.run();
+ RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
}
void ClConvolution2dWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 6fa9ddc6b0..6b159f15e4 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -137,7 +137,7 @@ void ClDepthwiseConvolutionWorkload::Execute() const
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
BOOST_ASSERT(m_DepthwiseConvolutionLayer);
- m_DepthwiseConvolutionLayer->run();
+ RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
}
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
index a2d8534682..324d8bda8a 100644
--- a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
@@ -40,9 +40,7 @@ ClDivisionFloatWorkload::ClDivisionFloatWorkload(const DivisionQueueDescriptor&
void ClDivisionFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute");
-
- // Executes the layer.
- m_ArithmeticDivision.run();
+ RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
index 0a60fc3b5c..457d19eafe 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
@@ -25,7 +25,7 @@ ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descripto
void ClFloorFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index b3a97f35f8..7b2ecf0e8d 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -84,7 +84,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload(const FullyConnectedQueueDesc
void ClFullyConnectedWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
- m_FullyConnectedLayer.run();
+ RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
}
void ClFullyConnectedWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index f84801601a..0dd0603b54 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -48,7 +48,7 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza
void ClL2NormalizationFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index aa7110cad3..177368bdbe 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -217,7 +217,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor,
void ClLstmFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClLstmFloatWorkload_Execute");
- m_LstmLayer.run();
+ RunClFunction(m_LstmLayer, CHECK_LOCATION());
}
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 9d23caa695..c0bcdbc4c2 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -52,9 +52,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc
void ClMultiplicationWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationWorkload_Execute");
-
- // Executes the layer.
- m_PixelWiseMultiplication.run();
+ RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index f6c07e1c7a..f3cc6ec08d 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -49,7 +49,7 @@ ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQu
void ClNormalizationFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute");
- m_NormalizationLayer.run();
+ RunClFunction(m_NormalizationLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 3e63d5c210..44c0eeab20 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -37,7 +37,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor, const Workloa
void ClPadWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClPadWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClPadValidate(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp
index 5dacc83749..39fa56f195 100644
--- a/src/backends/cl/workloads/ClPermuteWorkload.cpp
+++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp
@@ -45,7 +45,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
void ClPermuteWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL( GetName() + "_Execute");
- m_PermuteFunction.run();
+ RunClFunction(m_PermuteFunction, CHECK_LOCATION());
}
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
index 68512ff980..b54afd2fa4 100644
--- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
@@ -51,7 +51,7 @@ ClPooling2dWorkload::ClPooling2dWorkload(
void ClPooling2dWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dWorkload_Execute");
- m_PoolingLayer.run();
+ RunClFunction(m_PoolingLayer, CHECK_LOCATION());
}
}
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index 43a53cb7a1..47cea944d9 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -26,7 +26,7 @@ ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor, c
void ClReshapeWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
index 4ee6d5e7a5..c4f0a041df 100644
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
@@ -38,8 +38,7 @@ ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinea
void ClResizeBilinearFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
- m_ResizeBilinearLayer.run();
+ RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION());
}
-
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
index 606005659f..ed012cc30b 100644
--- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
@@ -27,7 +27,7 @@ ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& des
void ClSoftmaxFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
- m_SoftmaxLayer.run();
+ RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index 7e0589e89f..d06306e178 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -36,8 +36,7 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des
void ClSoftmaxUint8Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute");
-
- m_SoftmaxLayer.run();
+ RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 1967fae354..e23dab0f57 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -32,7 +32,7 @@ ClSubtractionWorkload::ClSubtractionWorkload(const SubtractionQueueDescriptor& d
void ClSubtractionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index c765c63dce..ca0de8dd0a 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -10,6 +10,10 @@
#include <backends/cl/OpenClTimer.hpp>
#include <backends/CpuTensorHandle.hpp>
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+#include <sstream>
+
#define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
name, \
@@ -60,4 +64,24 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
}
};
+inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
+{
+ std::stringstream message;
+ message << "CL error: " << clError.what() << ". Error code: " << clError.err();
+
+ return RuntimeException(message.str(), location);
+}
+
+inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
+{
+ try
+ {
+ function.run();
+ }
+ catch (cl::Error& error)
+ {
+ throw WrapClError(error, location);
+ }
+}
+
} //namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index b3e1dd9563..0e069a2f64 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -79,13 +79,13 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const Ten
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
+ return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
+ return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
@@ -97,8 +97,8 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const Activatio
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonSoftmaxFloatWorkload, NeonSoftmaxUint8Workload>(descriptor, info,
- m_MemoryManager.GetIntraLayerManager());
+ return MakeWorkloadHelper<NeonSoftmaxFloatWorkload, NeonSoftmaxUint8Workload>(descriptor, info,
+ m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
@@ -116,8 +116,8 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMerger(const Merger
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NeonFullyConnectedWorkload, NeonFullyConnectedWorkload>(descriptor, info,
- m_MemoryManager.GetIntraLayerManager());
+ return MakeWorkloadHelper<NeonFullyConnectedWorkload, NeonFullyConnectedWorkload>(descriptor, info,
+ m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
@@ -148,38 +148,38 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
- m_MemoryManager.GetIntraLayerManager());
+ return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
+ m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonAdditionFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NeonAdditionFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NeonMultiplicationFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NeonMultiplicationFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NeonSubtractionFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NeonSubtractionFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NeonBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NeonBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
@@ -190,7 +190,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCo
throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
}
- return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
+ return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
@@ -210,8 +210,8 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
- m_MemoryManager.GetIntraLayerManager());
+ return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
+ m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
@@ -229,13 +229,13 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueue
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
@@ -255,13 +255,13 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
void NeonWorkloadFactory::Finalize()
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index b1f9d6c70a..048f6cdcc4 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -18,7 +18,7 @@ template <typename F32Workload, typename U8Workload, typename QueueDescriptorTyp
std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
const WorkloadInfo& info) const
{
- return armnn::MakeWorkload<NullWorkload, F32Workload, U8Workload>(descriptor, info);
+ return armnn::MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload>(descriptor, info);
}
RefWorkloadFactory::RefWorkloadFactory()
@@ -114,7 +114,7 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateFullyConnected(
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return armnn::MakeWorkload<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteUint8Workload>
+ return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteUint8Workload>
(descriptor, info);
}