From a8e06ed540a934f966679e1ef1cf7acf295211b3 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Fri, 19 Oct 2018 16:46:15 +0100 Subject: IVGCVSW-1955: Unify backend exceptions (wrap cl::Error) * Added wrapper function around arm_compute::IFunction::run() that catches cl::Error and wraps it into an armnn::RuntimeException * Added MakeWorkload template inside ClWorkloadFactory that catches cl::Error and wraps it into an armnn::RuntimeException * Replaced cl::Error with armnn::RuntimeException in catch statements inside LoadedNetwork Change-Id: I2340f41ae02b8db1d7ef5157824a50e7410854e3 --- include/armnn/Exceptions.hpp | 5 ++ src/armnn/LoadedNetwork.cpp | 56 +++++++----------- src/backends/MakeWorkloadHelper.hpp | 10 +++- src/backends/cl/ClWorkloadFactory.cpp | 69 ++++++++++++++++------ src/backends/cl/ClWorkloadFactory.hpp | 10 ++++ src/backends/cl/workloads/ClActivationWorkload.cpp | 2 +- src/backends/cl/workloads/ClAdditionWorkload.cpp | 2 +- .../ClBatchNormalizationFloatWorkload.cpp | 2 +- .../cl/workloads/ClConvertFp16ToFp32Workload.cpp | 2 +- .../cl/workloads/ClConvertFp32ToFp16Workload.cpp | 2 +- .../cl/workloads/ClConvolution2dWorkload.cpp | 3 +- .../workloads/ClDepthwiseConvolutionWorkload.cpp | 2 +- .../cl/workloads/ClDivisionFloatWorkload.cpp | 4 +- src/backends/cl/workloads/ClFloorFloatWorkload.cpp | 2 +- .../cl/workloads/ClFullyConnectedWorkload.cpp | 2 +- .../workloads/ClL2NormalizationFloatWorkload.cpp | 2 +- src/backends/cl/workloads/ClLstmFloatWorkload.cpp | 2 +- .../cl/workloads/ClMultiplicationWorkload.cpp | 4 +- .../cl/workloads/ClNormalizationFloatWorkload.cpp | 2 +- src/backends/cl/workloads/ClPadWorkload.cpp | 2 +- src/backends/cl/workloads/ClPermuteWorkload.cpp | 2 +- src/backends/cl/workloads/ClPooling2dWorkload.cpp | 2 +- src/backends/cl/workloads/ClReshapeWorkload.cpp | 2 +- .../cl/workloads/ClResizeBilinearFloatWorkload.cpp | 3 +- .../cl/workloads/ClSoftmaxFloatWorkload.cpp | 2 +- .../cl/workloads/ClSoftmaxUint8Workload.cpp | 3 +- .../cl/workloads/ClSubtractionWorkload.cpp | 2 +- src/backends/cl/workloads/ClWorkloadUtils.hpp | 24 ++++++++ src/backends/neon/NeonWorkloadFactory.cpp | 40 ++++++------- src/backends/reference/RefWorkloadFactory.cpp | 4 +- 30 files changed, 162 insertions(+), 107 deletions(-) diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp index 29d874cd05..008617d457 100644 --- a/include/armnn/Exceptions.hpp +++ b/include/armnn/Exceptions.hpp @@ -110,6 +110,11 @@ class BadOptionalAccessException : public Exception using Exception::Exception; }; +class RuntimeException : public Exception +{ + using Exception::Exception; +}; + template void ConditionalThrow(bool condition, const std::string& message) { diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 4f73bda832..f49fa7b878 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -11,10 +11,6 @@ #include "Profiling.hpp" #include "HeapProfiling.hpp" -#ifdef ARMCOMPUTECL_ENABLED -#include -#endif - #include #include @@ -38,15 +34,6 @@ std::string ToErrorMessage(const char * prefix, const ExceptionType & error) return ss.str(); } -#if ARMCOMPUTECL_ENABLED -std::string ToErrorMessage(const char * prefix, const cl::Error& error) -{ - std::stringstream ss; - ss << prefix << " " << error.what() << ". CL error code is: " << error.err(); - return ss.str(); -} -#endif - } // anonymous std::unique_ptr LoadedNetwork::MakeLoadedNetwork(std::unique_ptr net, @@ -54,30 +41,30 @@ std::unique_ptr LoadedNetwork::MakeLoadedNetwork(std::unique_ptr< { std::unique_ptr loadedNetwork; + auto Fail = [&](const std::exception& error) -> std::unique_ptr + { + errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error); + BOOST_LOG_TRIVIAL(error) << errorMessage; + + return std::unique_ptr(); + }; + try { loadedNetwork.reset(new LoadedNetwork(std::move(net))); } - catch (const std::runtime_error& error) + catch (const armnn::RuntimeException& error) { - errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error); - BOOST_LOG_TRIVIAL(error) << errorMessage; - return std::unique_ptr(); + return Fail(error); } catch (const armnn::Exception& error) { - errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error); - BOOST_LOG_TRIVIAL(error) << errorMessage; - return std::unique_ptr(); + return Fail(error); } -#if ARMCOMPUTECL_ENABLED - catch (const cl::Error& error) + catch (const std::runtime_error& error) { - errorMessage = ToErrorMessage("A CL error occurred attempting to prepare a network workload: ", error); - BOOST_LOG_TRIVIAL(error) << errorMessage; - return std::unique_ptr(); + return Fail(error); } -#endif return loadedNetwork; } @@ -420,6 +407,12 @@ bool LoadedNetwork::Execute() m_CpuAcc.Acquire(); m_GpuAcc.Acquire(); + auto Fail = [&](const std::exception& error) + { + BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what(); + success = false; + }; + try { for (size_t i = 0; i < m_WorkloadQueue.size(); ++i) @@ -427,18 +420,13 @@ bool LoadedNetwork::Execute() m_WorkloadQueue[i]->Execute(); } } -#if ARMCOMPUTECL_ENABLED - catch (const cl::Error& error) + catch (const RuntimeException& error) { - BOOST_LOG_TRIVIAL(error) << "A CL error occurred attempting to execute a workload: " - << error.what() << ". CL error code is: " << error.err(); - success = false; + Fail(error); } -#endif catch (const std::runtime_error& error) { - BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what(); - success = false; + Fail(error); } // Informs the memory managers to release memory in it's respective memory group diff --git a/src/backends/MakeWorkloadHelper.hpp b/src/backends/MakeWorkloadHelper.hpp index 281a65a21e..78a9669530 100644 --- a/src/backends/MakeWorkloadHelper.hpp +++ b/src/backends/MakeWorkloadHelper.hpp @@ -39,7 +39,9 @@ struct MakeWorkloadForType // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos. template -std::unique_ptr MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args) +std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descriptor, + const WorkloadInfo& info, + Args&&... args) { const DataType dataType = !info.m_InputTensorInfos.empty() ? info.m_InputTensorInfos[0].GetDataType() @@ -67,9 +69,11 @@ std::unique_ptr MakeWorkload(const QueueDescriptorType& descriptor, c // FloatWorkload, Uint8Workload>. // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos. template -std::unique_ptr MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args) +std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descriptor, + const WorkloadInfo& info, + Args&&... args) { - return MakeWorkload(descriptor, info, + return MakeWorkloadHelper(descriptor, info, std::forward(args)...); } diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index 68d371388c..e1d8314d82 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -16,12 +16,13 @@ #include #include -#include - #include -#include #include + +#include +#include +#include #endif #include @@ -42,6 +43,36 @@ bool ClWorkloadFactory::IsLayerSupported(const Layer& layer, #ifdef ARMCOMPUTECL_ENABLED +template +std::unique_ptr ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor, + const WorkloadInfo& info, + Args&&... args) +{ + try + { + return MakeWorkloadHelper(descriptor, info, std::forward(args)...); + } + catch (const cl::Error& clError) + { + throw WrapClError(clError, CHECK_LOCATION()); + } +} + +template +std::unique_ptr ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor, + const WorkloadInfo& info, + Args&&... args) +{ + try + { + return std::make_unique(descriptor, info, std::forward(args)...); + } + catch (const cl::Error& clError) + { + throw WrapClError(clError, CHECK_LOCATION()); + } +} + ClWorkloadFactory::ClWorkloadFactory() : m_MemoryManager(std::make_unique()) { @@ -100,26 +131,26 @@ std::unique_ptr ClWorkloadFactory::CreateOutput(const OutputQueueDesc std::unique_ptr ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkload(descriptor, info, - m_MemoryManager.GetIntraLayerManager()); + m_MemoryManager.GetIntraLayerManager()); } std::unique_ptr ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateFullyConnected( @@ -132,25 +163,25 @@ std::unique_ptr ClWorkloadFactory::CreateFullyConnected( std::unique_ptr ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info, m_MemoryManager.GetIntraLayerManager()); + return MakeWorkload(descriptor, info, m_MemoryManager.GetIntraLayerManager()); } std::unique_ptr ClWorkloadFactory::CreateDepthwiseConvolution2d( const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor, @@ -162,13 +193,13 @@ std::unique_ptr ClWorkloadFactory::CreateNormalization(const N std::unique_ptr ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateMultiplication( const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateDivision( @@ -180,7 +211,7 @@ std::unique_ptr ClWorkloadFactory::CreateDivision( std::unique_ptr ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateBatchNormalization( @@ -223,13 +254,13 @@ std::unique_ptr ClWorkloadFactory::CreateL2Normalization(const L2Norm std::unique_ptr ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, @@ -248,14 +279,14 @@ std::unique_ptr ClWorkloadFactory::CreateConvertFp16ToFp32( const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateConvertFp32ToFp16( const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } std::unique_ptr ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor, @@ -267,7 +298,7 @@ std::unique_ptr ClWorkloadFactory::CreateMean(const MeanQueueDescript std::unique_ptr ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkload(descriptor, info); } void ClWorkloadFactory::Finalize() diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 9f8ec62db7..66de3a50f1 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -132,6 +132,16 @@ public: private: #ifdef ARMCOMPUTECL_ENABLED + template + static std::unique_ptr MakeWorkload(const QueueDescriptorType& descriptor, + const WorkloadInfo& info, + Args&&... args); + + template + static std::unique_ptr MakeWorkload(const QueueDescriptorType& descriptor, + const WorkloadInfo& info, + Args&&... args); + mutable ClMemoryManager m_MemoryManager; #endif }; diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp index 426af9f16d..188ad3283e 100644 --- a/src/backends/cl/workloads/ClActivationWorkload.cpp +++ b/src/backends/cl/workloads/ClActivationWorkload.cpp @@ -53,7 +53,7 @@ ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& desc void ClActivationWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationWorkload_Execute"); - m_ActivationLayer.run(); + RunClFunction(m_ActivationLayer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp index c9ac958402..6ec207a956 100644 --- a/src/backends/cl/workloads/ClAdditionWorkload.cpp +++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp @@ -32,7 +32,7 @@ ClAdditionWorkload::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor void ClAdditionWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } arm_compute::Status ClAdditionValidate(const TensorInfo& input0, diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp index 24be7cddca..1f3f9b540a 100644 --- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp @@ -94,7 +94,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload( void ClBatchNormalizationFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } void ClBatchNormalizationFloatWorkload::FreeUnusedTensors() diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp index 2c9a0e1fc2..b489ced066 100644 --- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp @@ -29,7 +29,7 @@ ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload( void ClConvertFp16ToFp32Workload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp16ToFp32Workload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output) diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp index 6758180a6e..781607f716 100644 --- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp @@ -29,7 +29,7 @@ ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload( void ClConvertFp32ToFp16Workload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp32ToFp16Workload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output) diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp index 301859ee1b..7c876ab7bb 100644 --- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp +++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp @@ -106,8 +106,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip void ClConvolution2dWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute"); - - m_ConvolutionLayer.run(); + RunClFunction(m_ConvolutionLayer, CHECK_LOCATION()); } void ClConvolution2dWorkload::FreeUnusedTensors() diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp index 6fa9ddc6b0..6b159f15e4 100644 --- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp +++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp @@ -137,7 +137,7 @@ void ClDepthwiseConvolutionWorkload::Execute() const ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute"); BOOST_ASSERT(m_DepthwiseConvolutionLayer); - m_DepthwiseConvolutionLayer->run(); + RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION()); } } // namespace armnn diff --git a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp index a2d8534682..324d8bda8a 100644 --- a/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClDivisionFloatWorkload.cpp @@ -40,9 +40,7 @@ ClDivisionFloatWorkload::ClDivisionFloatWorkload(const DivisionQueueDescriptor& void ClDivisionFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute"); - - // Executes the layer. - m_ArithmeticDivision.run(); + RunClFunction(m_ArithmeticDivision, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp index 0a60fc3b5c..457d19eafe 100644 --- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp @@ -25,7 +25,7 @@ ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descripto void ClFloorFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp index b3a97f35f8..7b2ecf0e8d 100644 --- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp +++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp @@ -84,7 +84,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload(const FullyConnectedQueueDesc void ClFullyConnectedWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute"); - m_FullyConnectedLayer.run(); + RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION()); } void ClFullyConnectedWorkload::FreeUnusedTensors() diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp index f84801601a..0dd0603b54 100644 --- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp @@ -48,7 +48,7 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza void ClL2NormalizationFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp index aa7110cad3..177368bdbe 100644 --- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp @@ -217,7 +217,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor, void ClLstmFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClLstmFloatWorkload_Execute"); - m_LstmLayer.run(); + RunClFunction(m_LstmLayer, CHECK_LOCATION()); } arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn, diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp index 9d23caa695..c0bcdbc4c2 100644 --- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp +++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp @@ -52,9 +52,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc void ClMultiplicationWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationWorkload_Execute"); - - // Executes the layer. - m_PixelWiseMultiplication.run(); + RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp index f6c07e1c7a..f3cc6ec08d 100644 --- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp @@ -49,7 +49,7 @@ ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQu void ClNormalizationFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute"); - m_NormalizationLayer.run(); + RunClFunction(m_NormalizationLayer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp index 3e63d5c210..44c0eeab20 100644 --- a/src/backends/cl/workloads/ClPadWorkload.cpp +++ b/src/backends/cl/workloads/ClPadWorkload.cpp @@ -37,7 +37,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor, const Workloa void ClPadWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClPadWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } arm_compute::Status ClPadValidate(const TensorInfo& input, diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp index 5dacc83749..39fa56f195 100644 --- a/src/backends/cl/workloads/ClPermuteWorkload.cpp +++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp @@ -45,7 +45,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, void ClPermuteWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL( GetName() + "_Execute"); - m_PermuteFunction.run(); + RunClFunction(m_PermuteFunction, CHECK_LOCATION()); } } // namespace armnn diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp index 68512ff980..b54afd2fa4 100644 --- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp +++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp @@ -51,7 +51,7 @@ ClPooling2dWorkload::ClPooling2dWorkload( void ClPooling2dWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dWorkload_Execute"); - m_PoolingLayer.run(); + RunClFunction(m_PoolingLayer, CHECK_LOCATION()); } } diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp index 43a53cb7a1..47cea944d9 100644 --- a/src/backends/cl/workloads/ClReshapeWorkload.cpp +++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp @@ -26,7 +26,7 @@ ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor, c void ClReshapeWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp index 4ee6d5e7a5..c4f0a041df 100644 --- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp @@ -38,8 +38,7 @@ ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinea void ClResizeBilinearFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute"); - m_ResizeBilinearLayer.run(); + RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION()); } - } //namespace armnn diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp index 606005659f..ed012cc30b 100644 --- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp @@ -27,7 +27,7 @@ ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& des void ClSoftmaxFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute"); - m_SoftmaxLayer.run(); + RunClFunction(m_SoftmaxLayer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp index 7e0589e89f..d06306e178 100644 --- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp @@ -36,8 +36,7 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des void ClSoftmaxUint8Workload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute"); - - m_SoftmaxLayer.run(); + RunClFunction(m_SoftmaxLayer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp index 1967fae354..e23dab0f57 100644 --- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp +++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp @@ -32,7 +32,7 @@ ClSubtractionWorkload::ClSubtractionWorkload(const SubtractionQueueDescriptor& d void ClSubtractionWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute"); - m_Layer.run(); + RunClFunction(m_Layer, CHECK_LOCATION()); } arm_compute::Status ClSubtractionValidate(const TensorInfo& input0, diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp index c765c63dce..ca0de8dd0a 100644 --- a/src/backends/cl/workloads/ClWorkloadUtils.hpp +++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp @@ -10,6 +10,10 @@ #include #include +#include + +#include + #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ name, \ @@ -60,4 +64,24 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor, } }; +inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location) +{ + std::stringstream message; + message << "CL error: " << clError.what() << ". Error code: " << clError.err(); + + return RuntimeException(message.str(), location); +} + +inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location) +{ + try + { + function.run(); + } + catch (cl::Error& error) + { + throw WrapClError(error, location); + } +} + } //namespace armnn diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index b3e1dd9563..0e069a2f64 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -79,13 +79,13 @@ std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const Ten std::unique_ptr NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -97,8 +97,8 @@ std::unique_ptr NeonWorkloadFactory::CreateActivation(const Activatio std::unique_ptr NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info, - m_MemoryManager.GetIntraLayerManager()); + return MakeWorkloadHelper(descriptor, info, + m_MemoryManager.GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, @@ -116,8 +116,8 @@ std::unique_ptr NeonWorkloadFactory::CreateMerger(const Merger std::unique_ptr NeonWorkloadFactory::CreateFullyConnected( const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info, - m_MemoryManager.GetIntraLayerManager()); + return MakeWorkloadHelper(descriptor, info, + m_MemoryManager.GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, @@ -148,38 +148,38 @@ std::unique_ptr NeonWorkloadFactory::CreateDepthwiseConvolution2d( std::unique_ptr NeonWorkloadFactory::CreateNormalization( const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info, - m_MemoryManager.GetIntraLayerManager()); + return MakeWorkloadHelper(descriptor, info, + m_MemoryManager.GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMultiplication( const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDivision( const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSubtraction( const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateBatchNormalization( const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, @@ -190,7 +190,7 @@ std::unique_ptr NeonWorkloadFactory::CreateMemCopy(const MemCo throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload"); } - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateResizeBilinear( @@ -210,8 +210,8 @@ std::unique_ptr NeonWorkloadFactory::CreateFakeQuantization( std::unique_ptr NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info, - m_MemoryManager.GetIntraLayerManager()); + return MakeWorkloadHelper(descriptor, info, + m_MemoryManager.GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, @@ -229,13 +229,13 @@ std::unique_ptr NeonWorkloadFactory::CreateReshape(const ReshapeQueue std::unique_ptr NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConvertFp16ToFp32( @@ -255,13 +255,13 @@ std::unique_ptr NeonWorkloadFactory::CreateConvertFp32ToFp16( std::unique_ptr NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } void NeonWorkloadFactory::Finalize() diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index b1f9d6c70a..048f6cdcc4 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -18,7 +18,7 @@ template RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const { - return armnn::MakeWorkload(descriptor, info); + return armnn::MakeWorkloadHelper(descriptor, info); } RefWorkloadFactory::RefWorkloadFactory() @@ -114,7 +114,7 @@ std::unique_ptr RefWorkloadFactory::CreateFullyConnected( std::unique_ptr RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return armnn::MakeWorkload + return MakeWorkloadHelper (descriptor, info); } -- cgit v1.2.1