aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl/workloads')
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp3
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDivisionFloatWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClFloorFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMultiplicationWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPadWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPermuteWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPooling2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClReshapeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp3
-rw-r--r--src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp3
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp24
23 files changed, 46 insertions, 29 deletions
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 426af9f16d..188ad3283e 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -53,7 +53,7 @@ ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& desc
void ClActivationWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationWorkload_Execute");
- m_ActivationLayer.run();
+ RunClFunction(m_ActivationLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index c9ac958402..6ec207a956 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -32,7 +32,7 @@ ClAdditionWorkload::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor
void ClAdditionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index 24be7cddca..1f3f9b540a 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -94,7 +94,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
void ClBatchNormalizationFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 2c9a0e1fc2..b489ced066 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -29,7 +29,7 @@ ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload(
void ClConvertFp16ToFp32Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp16ToFp32Workload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 6758180a6e..781607f716 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -29,7 +29,7 @@ ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload(
void ClConvertFp32ToFp16Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp32ToFp16Workload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 301859ee1b..7c876ab7bb 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -106,8 +106,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
void ClConvolution2dWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
-
- m_ConvolutionLayer.run();
+ RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
}
void ClConvolution2dWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 6fa9ddc6b0..6b159f15e4 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -137,7 +137,7 @@ void ClDepthwiseConvolutionWorkload::Execute() const
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
BOOST_ASSERT(m_DepthwiseConvolutionLayer);
- m_DepthwiseConvolutionLayer->run();
+ RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
}
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
index a2d8534682..324d8bda8a 100644
--- a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp
@@ -40,9 +40,7 @@ ClDivisionFloatWorkload::ClDivisionFloatWorkload(const DivisionQueueDescriptor&
void ClDivisionFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute");
-
- // Executes the layer.
- m_ArithmeticDivision.run();
+ RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
index 0a60fc3b5c..457d19eafe 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
@@ -25,7 +25,7 @@ ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descripto
void ClFloorFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index b3a97f35f8..7b2ecf0e8d 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -84,7 +84,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload(const FullyConnectedQueueDesc
void ClFullyConnectedWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
- m_FullyConnectedLayer.run();
+ RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
}
void ClFullyConnectedWorkload::FreeUnusedTensors()
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index f84801601a..0dd0603b54 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -48,7 +48,7 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza
void ClL2NormalizationFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index aa7110cad3..177368bdbe 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -217,7 +217,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor,
void ClLstmFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClLstmFloatWorkload_Execute");
- m_LstmLayer.run();
+ RunClFunction(m_LstmLayer, CHECK_LOCATION());
}
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 9d23caa695..c0bcdbc4c2 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -52,9 +52,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc
void ClMultiplicationWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationWorkload_Execute");
-
- // Executes the layer.
- m_PixelWiseMultiplication.run();
+ RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index f6c07e1c7a..f3cc6ec08d 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -49,7 +49,7 @@ ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQu
void ClNormalizationFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute");
- m_NormalizationLayer.run();
+ RunClFunction(m_NormalizationLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 3e63d5c210..44c0eeab20 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -37,7 +37,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor, const Workloa
void ClPadWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClPadWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClPadValidate(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp
index 5dacc83749..39fa56f195 100644
--- a/src/backends/cl/workloads/ClPermuteWorkload.cpp
+++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp
@@ -45,7 +45,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
void ClPermuteWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL( GetName() + "_Execute");
- m_PermuteFunction.run();
+ RunClFunction(m_PermuteFunction, CHECK_LOCATION());
}
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
index 68512ff980..b54afd2fa4 100644
--- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
@@ -51,7 +51,7 @@ ClPooling2dWorkload::ClPooling2dWorkload(
void ClPooling2dWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dWorkload_Execute");
- m_PoolingLayer.run();
+ RunClFunction(m_PoolingLayer, CHECK_LOCATION());
}
}
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index 43a53cb7a1..47cea944d9 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -26,7 +26,7 @@ ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor, c
void ClReshapeWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
index 4ee6d5e7a5..c4f0a041df 100644
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
@@ -38,8 +38,7 @@ ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinea
void ClResizeBilinearFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
- m_ResizeBilinearLayer.run();
+ RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION());
}
-
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
index 606005659f..ed012cc30b 100644
--- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
@@ -27,7 +27,7 @@ ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& des
void ClSoftmaxFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
- m_SoftmaxLayer.run();
+ RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index 7e0589e89f..d06306e178 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -36,8 +36,7 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des
void ClSoftmaxUint8Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute");
-
- m_SoftmaxLayer.run();
+ RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 1967fae354..e23dab0f57 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -32,7 +32,7 @@ ClSubtractionWorkload::ClSubtractionWorkload(const SubtractionQueueDescriptor& d
void ClSubtractionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute");
- m_Layer.run();
+ RunClFunction(m_Layer, CHECK_LOCATION());
}
arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index c765c63dce..ca0de8dd0a 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -10,6 +10,10 @@
#include <backends/cl/OpenClTimer.hpp>
#include <backends/CpuTensorHandle.hpp>
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+#include <sstream>
+
#define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
name, \
@@ -60,4 +64,24 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
}
};
+inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
+{
+ std::stringstream message;
+ message << "CL error: " << clError.what() << ". Error code: " << clError.err();
+
+ return RuntimeException(message.str(), location);
+}
+
+inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
+{
+ try
+ {
+ function.run();
+ }
+ catch (cl::Error& error)
+ {
+ throw WrapClError(error, location);
+ }
+}
+
} //namespace armnn