From 1dc83febfb76d6a770bdf3ba16c4034a970c2320 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Thu, 16 May 2024 09:47:51 +0100 Subject: IVGCVSW-8260 Update Doxgen Docu for 24.05 Signed-off-by: Nikhil Raj Change-Id: If4bc983bf2793a27ded8e26ac2b29523fc1e4711 --- latest/classarmnn_1_1_neon_splitter_workload.html | 136 +++++++++++----------- 1 file changed, 68 insertions(+), 68 deletions(-) (limited to 'latest/classarmnn_1_1_neon_splitter_workload.html') diff --git a/latest/classarmnn_1_1_neon_splitter_workload.html b/latest/classarmnn_1_1_neon_splitter_workload.html index af83336ffc..e9132ba9fa 100644 --- a/latest/classarmnn_1_1_neon_splitter_workload.html +++ b/latest/classarmnn_1_1_neon_splitter_workload.html @@ -36,7 +36,7 @@ ArmNN
-  24.02 +  24.05
@@ -199,62 +199,62 @@ Additional Inherited Members
-

Definition at line 56 of file NeonSplitterWorkload.cpp.

-
57  : NeonBaseWorkload<SplitterQueueDescriptor>(descriptor, info)
-
58 {
-
59  // Report Profiling Details
-
60  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSplitterWorkload_Construct",
-
61  descriptor.m_Parameters,
-
62  info,
-
63  this->GetGuid());
-
64 
-
65  bool allOutputsAreSubtensors = true;
-
66 
-
67  // Check that all outputs are sub-tensors
-
68  for (auto output : m_Data.m_Outputs)
-
69  {
-
70  if (output && !output->GetParent())
-
71  {
-
72  // Non sub-tensor input found so we need to execute the split function
-
73  allOutputsAreSubtensors = false;
-
74  break;
-
75  }
-
76  }
-
77 
-
78  if (allOutputsAreSubtensors)
-
79  {
-
80  // Can skip configuring the split function since it's not executed
-
81  return;
-
82  }
-
83 
-
84  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-
85 
-
86  std::vector<arm_compute::ITensor *> aclOutputs;
-
87  for (auto output : m_Data.m_Outputs)
-
88  {
-
89  arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor();
-
90  aclOutputs.emplace_back(&aclOutput);
-
91  }
-
92 
-
93  // Create the layer function
-
94  std::unique_ptr<arm_compute::NESplit> layer(new arm_compute::NESplit());
-
95 
-
96  // Configure input and output tensors
-
97  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
-
98  if (splitAxis.size() != 1)
-
99  {
-
100  throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
-
101  }
-
102 
-
103  unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
-
104  layer->configure(&input, aclOutputs, aclAxis);
-
105 
-
106  // Prepare
-
107  layer->prepare();
-
108  m_Layer.reset(layer.release());
-
109 }
+

Definition at line 57 of file NeonSplitterWorkload.cpp.

+
58  : NeonBaseWorkload<SplitterQueueDescriptor>(descriptor, info)
+
59 {
+
60  // Report Profiling Details
+
61  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSplitterWorkload_Construct",
+
62  descriptor.m_Parameters,
+
63  info,
+
64  this->GetGuid());
+
65 
+
66  bool allOutputsAreSubtensors = true;
+
67 
+
68  // Check that all outputs are sub-tensors
+
69  for (auto output : m_Data.m_Outputs)
+
70  {
+
71  if (output && !output->GetParent())
+
72  {
+
73  // Non sub-tensor input found so we need to execute the split function
+
74  allOutputsAreSubtensors = false;
+
75  break;
+
76  }
+
77  }
+
78 
+
79  if (allOutputsAreSubtensors)
+
80  {
+
81  // Can skip configuring the split function since it's not executed
+
82  return;
+
83  }
+
84 
+
85  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
86 
+
87  std::vector<arm_compute::ITensor *> aclOutputs;
+
88  for (auto output : m_Data.m_Outputs)
+
89  {
+
90  arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor();
+
91  aclOutputs.emplace_back(&aclOutput);
+
92  }
+
93 
+
94  // Create the layer function
+
95  std::unique_ptr<arm_compute::NESplit> layer(new arm_compute::NESplit());
+
96 
+
97  // Configure input and output tensors
+
98  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
+
99  if (splitAxis.size() != 1)
+
100  {
+
101  throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
+
102  }
+
103 
+
104  unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
+
105  layer->configure(&input, aclOutputs, aclAxis);
+
106 
+
107  // Prepare
+
108  layer->prepare();
+
109  m_Layer.reset(layer.release());
+
110 }
-

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, armnn::ComputeSplitAxis(), ViewsDescriptor::GetNumDimensions(), armnn::info, BaseWorkload< SplitterQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

+

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, armnn::ComputeSplitAxis(), ViewsDescriptor::GetNumDimensions(), armnn::info, BaseWorkload< SplitterQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

@@ -284,16 +284,16 @@ Additional Inherited Members

Implements IWorkload.

-

Definition at line 111 of file NeonSplitterWorkload.cpp.

-
112 {
-
113  if (m_Layer)
-
114  {
-
115  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSplitterWorkload_Execute");
-
116  m_Layer->run();
-
117  }
-
118 }
+

Definition at line 112 of file NeonSplitterWorkload.cpp.

+
113 {
+
114  if (m_Layer)
+
115  {
+
116  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSplitterWorkload_Execute");
+
117  m_Layer->run();
+
118  }
+
119 }
-

References ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID.

+

References ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID.

@@ -303,17 +303,17 @@ Additional Inherited Members -
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
std::vector< ITensorHandle * > m_Outputs
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
-
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
+
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
+
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
std::vector< ITensorHandle * > m_Inputs