diff options
author | Keith Davis <keith.davis@arm.com> | 2021-08-04 10:35:20 +0100 |
---|---|---|
committer | KeithARM <keith.davis@arm.com> | 2021-08-05 08:11:06 +0000 |
commit | 5a64f22101ecdda4846e9d71428633f3ccd56fb2 (patch) | |
tree | 5d5dcb617bf2c2786b37a7c64bb6c54ca5696914 /src/backends | |
parent | 8c999dfeeca7b02a6ea1d0cdcd8c34472f6c9cce (diff) | |
download | armnn-5a64f22101ecdda4846e9d71428633f3ccd56fb2.tar.gz |
IVGCVSW-5980 Add Descriptor, TensorInfo and Convolution algorithm to JSON
* Add GUID as field to layer details and profiling events
* Add Optional GUID param to existing tests
* Improve Details macro to be inline function
* Fix some formatting
Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I66f192a90a7642b3ee8e7dda0d3f428cce002581
Diffstat (limited to 'src/backends')
7 files changed, 67 insertions, 53 deletions
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp index b3df7ce0b1..ab9d5bcbd2 100644 --- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp +++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp @@ -70,7 +70,6 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info) , m_ConvolutionLayer(memoryManager) { - // todo: check tensor shapes match. const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); m_KernelTensor = std::make_unique<arm_compute::CLTensor>(); @@ -121,21 +120,22 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip isFastMathEnabled); // Add details for profiling output - std::string workloadName = "ClConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid()); - WorkloadInfo detailsInfo; detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos; detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos; detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo()); - detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString()); + detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString(m_ConvolutionMethod)); if (descriptor.m_Parameters.m_BiasEnabled) { detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo()); } // Report Profiling Details - ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo); + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClConvolution2dWorkload_Execute_Guid", + descriptor.m_Parameters, + detailsInfo, + this->GetGuid()); InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight); @@ -152,7 +152,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip void ClConvolution2dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvolution2dWorkload_Execute", this->GetGuid()); RunClFunction(m_ConvolutionLayer, CHECK_LOCATION()); } @@ -161,23 +161,6 @@ arm_compute::ConvolutionMethod ClConvolution2dWorkload::GetConvolutionMethod() c return m_ConvolutionMethod; } -std::string ClConvolution2dWorkload::GetConvolutionMethodString() -{ - switch ( m_ConvolutionMethod ) - { - case arm_compute::ConvolutionMethod::FFT: - return "FFT"; - case arm_compute::ConvolutionMethod::DIRECT: - return "Direct"; - case arm_compute::ConvolutionMethod::GEMM: - return "GEMM"; - case arm_compute::ConvolutionMethod::WINOGRAD: - return "Winograd"; - default: - return "Unknown"; - } -} - void ClConvolution2dWorkload::FreeUnusedTensors() { FreeTensorIfUnused(m_KernelTensor); diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp index 49d7f773df..d0f7a5b251 100644 --- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp +++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp @@ -37,7 +37,6 @@ public: void Execute() const override; arm_compute::ConvolutionMethod GetConvolutionMethod() const; - std::string GetConvolutionMethodString(); private: mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer; diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp index 467505d55b..41b97c1e16 100644 --- a/src/backends/cl/workloads/ClWorkloadUtils.hpp +++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp @@ -19,6 +19,14 @@ #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ + armnn::EmptyOptional(), \ + name, \ + armnn::OpenClTimer(), \ + armnn::WallClockTimer()) + +#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \ + ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ + guid, \ name, \ armnn::OpenClTimer(), \ armnn::WallClockTimer()) @@ -26,6 +34,23 @@ namespace armnn { +inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod) +{ + switch (convolutionMethod) + { + case arm_compute::ConvolutionMethod::FFT: + return "FFT"; + case arm_compute::ConvolutionMethod::DIRECT: + return "Direct"; + case arm_compute::ConvolutionMethod::GEMM: + return "GEMM"; + case arm_compute::ConvolutionMethod::WINOGRAD: + return "Winograd"; + default: + return "Unknown"; + } +} + template <typename T> void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcData) { diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp index 1e12e13357..a6ae99b481 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp @@ -119,21 +119,22 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( isFastMathEnabled); // Add details for profiling output - std::string workloadName = "NeonConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid()); - WorkloadInfo detailsInfo; detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos; detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos; detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo()); - detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString()); + detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString(m_ConvolutionMethod)); if (descriptor.m_Parameters.m_BiasEnabled) { detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo()); } // Report Profiling Details - ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo); + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonConvolution2dWorkload_Execute", + descriptor.m_Parameters, + detailsInfo, + this->GetGuid()); m_ConvolutionLayer.reset(convolutionLayer.release()); @@ -152,7 +153,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( void NeonConvolution2dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvolution2dWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution2dWorkload_Execute", this->GetGuid()); m_ConvolutionLayer->run(); } @@ -161,23 +162,6 @@ arm_compute::ConvolutionMethod NeonConvolution2dWorkload::GetConvolutionMethod() return m_ConvolutionMethod; } -std::string NeonConvolution2dWorkload::GetConvolutionMethodString() -{ - switch ( m_ConvolutionMethod ) - { - case arm_compute::ConvolutionMethod::FFT: - return "FFT"; - case arm_compute::ConvolutionMethod::DIRECT: - return "Direct"; - case arm_compute::ConvolutionMethod::GEMM: - return "GEMM"; - case arm_compute::ConvolutionMethod::WINOGRAD: - return "Winograd"; - default: - return "Unknown"; - } -} - void NeonConvolution2dWorkload::FreeUnusedTensors() { FreeTensorIfUnused(m_KernelTensor); diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp index 4b4c07ae87..4b6e58ce41 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp @@ -37,7 +37,6 @@ public: void Execute() const override; arm_compute::ConvolutionMethod GetConvolutionMethod() const; - std::string GetConvolutionMethodString(); private: std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer; diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp index ab7616fbe2..1199f30863 100644 --- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp +++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp @@ -16,6 +16,14 @@ #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \ + armnn::EmptyOptional(), \ + name, \ + armnn::NeonTimer(), \ + armnn::WallClockTimer()) + +#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \ + ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \ + guid, \ name, \ armnn::NeonTimer(), \ armnn::WallClockTimer()) @@ -25,6 +33,23 @@ using namespace armnn::armcomputetensorutils; namespace armnn { +inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod) +{ + switch (convolutionMethod) + { + case arm_compute::ConvolutionMethod::FFT: + return "FFT"; + case arm_compute::ConvolutionMethod::DIRECT: + return "Direct"; + case arm_compute::ConvolutionMethod::GEMM: + return "GEMM"; + case arm_compute::ConvolutionMethod::WINOGRAD: + return "Winograd"; + default: + return "Unknown"; + } +} + template <typename T> void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData) { diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp index 7c331715d8..b0b88b18db 100644 --- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp @@ -16,9 +16,6 @@ RefConvolution2dWorkload::RefConvolution2dWorkload( const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info) { - // Construct params for reporting operator details - std::string workloadName = "RefConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid()); - WorkloadInfo detailsInfo; detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos; detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos; @@ -29,7 +26,10 @@ RefConvolution2dWorkload::RefConvolution2dWorkload( } // Report Profiling Details - ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo); + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("RefConvolution2dWorkload_Execute", + descriptor.m_Parameters, + detailsInfo, + this->GetGuid()); m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight )); const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo(); @@ -57,8 +57,7 @@ void RefConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDesc void RefConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const { - std::string workloadName = "RefConvolutionWorkload_Execute_Guid" + std::to_string(this->GetGuid()); - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, workloadName); + ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution2dWorkload_Execute", this->GetGuid()); std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map()); std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map()); |