aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2021-07-20 11:25:22 +0100
committerKeith Davis <keith.davis@arm.com>2021-08-04 11:49:16 +0100
commit554fa09a0f3d6c9c572634c9d2de9bfb6c3218b0 (patch)
tree1820a2cadcc1f34667199acff2d044e5d2083ea2 /src/backends/reference/workloads/RefConvolution2dWorkload.cpp
parent96fd98c28441618fbdf9376fe46a368ef06b19e1 (diff)
downloadarmnn-554fa09a0f3d6c9c572634c9d2de9bfb6c3218b0.tar.gz
IVGCVSW-5980 JSON profiling output
* Add new ProfilingDetails class to construct operator details string * Add new macro which helps append layer details to ostream * Add ProfilingEnabled to NetworkProperties so that profiling can be realised when loading the network * Add further optional info to WorkloadInfo specific to convolutions * Generalise some JsonPrinter functions into JsonUtils for reusability * Remove explicit enabling of profiling within InferenceModel as it is done when loading network * Add ProfilingDetails macros to ConvolutionWorkloads for validation Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: Ie84bc7dc667e72e6bcb635544f9ead7af1765690
Diffstat (limited to 'src/backends/reference/workloads/RefConvolution2dWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.cpp33
1 files changed, 25 insertions, 8 deletions
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index 5ae1af8967..7c331715d8 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -13,18 +13,33 @@
namespace armnn
{
RefConvolution2dWorkload::RefConvolution2dWorkload(
- const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
- : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
+ const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
{
- m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
+ // Construct params for reporting operator details
+ std::string workloadName = "RefConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid());
+
+ WorkloadInfo detailsInfo;
+ detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
+ detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ if (descriptor.m_Parameters.m_BiasEnabled)
+ {
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ }
+
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo);
+
+ m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight ));
const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
m_FilterShape = rFilterInfo.GetShape();
m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight.get()->Map(true));
- if (descriptor.m_Parameters.m_BiasEnabled)
+ if ( descriptor.m_Parameters.m_BiasEnabled )
{
- m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Bias ));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
@@ -35,13 +50,15 @@ void RefConvolution2dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
{
Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
}
-void RefConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const {
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvolution2dWorkload_Execute");
+void RefConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+{
+ std::string workloadName = "RefConvolutionWorkload_Execute_Guid" + std::to_string(this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, workloadName);
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());