From 1dc83febfb76d6a770bdf3ba16c4034a970c2320 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Thu, 16 May 2024 09:47:51 +0100 Subject: IVGCVSW-8260 Update Doxgen Docu for 24.05 Signed-off-by: Nikhil Raj Change-Id: If4bc983bf2793a27ded8e26ac2b29523fc1e4711 --- ..._neon_fully_connected_workload_8cpp_source.html | 230 +++++++++++---------- 1 file changed, 116 insertions(+), 114 deletions(-) (limited to 'latest/_neon_fully_connected_workload_8cpp_source.html') diff --git a/latest/_neon_fully_connected_workload_8cpp_source.html b/latest/_neon_fully_connected_workload_8cpp_source.html index 64b97615ea..6e6747a901 100644 --- a/latest/_neon_fully_connected_workload_8cpp_source.html +++ b/latest/_neon_fully_connected_workload_8cpp_source.html @@ -36,7 +36,7 @@ ArmNN
-  24.02 +  24.05
@@ -97,7 +97,7 @@ $(document).ready(function(){initNavTree('_neon_fully_connected_workload_8cpp_so
Go to the documentation of this file.
1 //
-
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
@@ -135,125 +135,126 @@ $(document).ready(function(){initNavTree('_neon_fully_connected_workload_8cpp_so
37  arm_compute::TensorInfo* optionalAclBiases = nullptr;
38  if (descriptor.m_BiasEnabled)
39  {
-
40  ARMNN_ASSERT(biases.has_value());
-
41  aclBiases = BuildArmComputeTensorInfo(biases.value());
-
42  aclBiases.set_are_values_constant(biases.value().IsConstant());
-
43  optionalAclBiases = &aclBiases;
-
44  }
-
45 
-
46  const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
-
47  ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
-
48  return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
-
49  &aclWeights,
-
50  optionalAclBiases,
-
51  &aclOutput,
-
52  fullyConnectedLayerInfo);
-
53 }
-
54 
- -
56  const WorkloadInfo& info,
-
57  ACLMemManagerOnDemand& memoryManager)
- -
59 {
-
60  m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
-
61 
-
62  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-
63  arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
-
64  m_WeightsTensorInfo = info.m_InputTensorInfos[1];
-
65  weights.info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
-
66  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
67  if (m_WeightsTensorInfo.IsConstant())
-
68  {
-
69  // Copy the weights' tensor into arm_compute tensor.
-
70  m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
-
71  BuildArmComputeTensor(*m_WeightsTensor, m_WeightsTensorInfo);
-
72  m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
-
73  }
-
74 
- -
76  {
-
77  // Copy the biases tensor into arm_compute tensor.
-
78  m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
-
79  m_BiasesTensorInfo = info.m_InputTensorInfos[2];
-
80  BuildArmComputeTensor(*m_BiasesTensor, m_BiasesTensorInfo);
-
81  m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
-
82  }
-
83 
-
84  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
-
85  arm_compute::FullyConnectedLayerInfo fc_info =
- -
87 
-
88  auto layer = std::make_unique<arm_compute::NEFullyConnectedLayer>(memoryManager);
-
89  if (m_WeightsTensorInfo.IsConstant())
-
90  {
-
91  layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
-
92  }
-
93  else
-
94  {
-
95  layer->configure(&input, &weights, m_BiasesTensor.get(), &output, fc_info);
-
96  }
-
97  m_FullyConnectedLayer.reset(layer.release());
-
98 
-
99  // Add details for profiling output
-
100  WorkloadInfo detailsInfo;
-
101 
-
102  detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
-
103  detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
-
104 
-
105  // Report Profiling Details
-
106  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonFullyConnectedWorkload_Construct",
-
107  descriptor.m_Parameters,
-
108  detailsInfo,
-
109  this->GetGuid());
-
110 }
-
111 
- -
113 {
-
114  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFullyConnectedWorkload_Execute");
-
115  // The constant tensors may not be fully in place until the workload is Executed
-
116  if (!prepared)
-
117  {
-
118  if (m_WeightsTensorInfo.IsConstant())
-
119  {
-
120  InitializeArmComputeTensorData(*m_WeightsTensor, m_WeightsTensorInfo, m_Data.m_Inputs[1]);
-
121  m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
-
122  }
-
123 
- -
125  {
-
126  InitializeArmComputeTensorData(*m_BiasesTensor, m_BiasesTensorInfo, m_Data.m_Inputs[2]);
-
127  m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
-
128  }
-
129  if (m_WeightsTensorInfo.IsConstant())
-
130  {
-
131  FreeTensorIfUnused(m_WeightsTensor);
-
132  }
-
133  if (m_BiasesTensorInfo.IsConstant())
-
134  {
-
135  FreeTensorIfUnused(m_BiasesTensor);
-
136  }
-
137  prepared = true;
-
138  }
-
139  m_FullyConnectedLayer->run();
-
140 }
-
141 
-
142 } //namespace armnn
+ +
41  biases.has_value(),
+
42  "NeonFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
+
43  aclBiases = BuildArmComputeTensorInfo(biases.value());
+
44  aclBiases.set_are_values_constant(biases.value().IsConstant());
+
45  optionalAclBiases = &aclBiases;
+
46  }
+
47 
+
48  const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
+
49  ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
+
50  return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
+
51  &aclWeights,
+
52  optionalAclBiases,
+
53  &aclOutput,
+
54  fullyConnectedLayerInfo);
+
55 }
+
56 
+ +
58  const WorkloadInfo& info,
+
59  ACLMemManagerOnDemand& memoryManager)
+ +
61 {
+
62  m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
+
63 
+
64  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
65  arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+
66  m_WeightsTensorInfo = info.m_InputTensorInfos[1];
+
67  weights.info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
+
68  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
69  if (m_WeightsTensorInfo.IsConstant())
+
70  {
+
71  // Copy the weights' tensor into arm_compute tensor.
+
72  m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
+
73  BuildArmComputeTensor(*m_WeightsTensor, m_WeightsTensorInfo);
+
74  m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
+
75  }
+
76 
+ +
78  {
+
79  // Copy the biases tensor into arm_compute tensor.
+
80  m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
+
81  m_BiasesTensorInfo = info.m_InputTensorInfos[2];
+
82  BuildArmComputeTensor(*m_BiasesTensor, m_BiasesTensorInfo);
+
83  m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
+
84  }
+
85 
+
86  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
87  arm_compute::FullyConnectedLayerInfo fc_info =
+ +
89 
+
90  auto layer = std::make_unique<arm_compute::NEFullyConnectedLayer>(memoryManager);
+
91  if (m_WeightsTensorInfo.IsConstant())
+
92  {
+
93  layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
+
94  }
+
95  else
+
96  {
+
97  layer->configure(&input, &weights, m_BiasesTensor.get(), &output, fc_info);
+
98  }
+
99  m_FullyConnectedLayer.reset(layer.release());
+
100 
+
101  // Add details for profiling output
+
102  WorkloadInfo detailsInfo;
+
103 
+
104  detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
+
105  detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
+
106 
+
107  // Report Profiling Details
+
108  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonFullyConnectedWorkload_Construct",
+
109  descriptor.m_Parameters,
+
110  detailsInfo,
+
111  this->GetGuid());
+
112 }
+
113 
+ +
115 {
+
116  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFullyConnectedWorkload_Execute");
+
117  // The constant tensors may not be fully in place until the workload is Executed
+
118  if (!prepared)
+
119  {
+
120  if (m_WeightsTensorInfo.IsConstant())
+
121  {
+
122  InitializeArmComputeTensorData(*m_WeightsTensor, m_WeightsTensorInfo, m_Data.m_Inputs[1]);
+
123  m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
+
124  }
+
125 
+ +
127  {
+
128  InitializeArmComputeTensorData(*m_BiasesTensor, m_BiasesTensorInfo, m_Data.m_Inputs[2]);
+
129  m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
+
130  }
+
131  if (m_WeightsTensorInfo.IsConstant())
+
132  {
+
133  FreeTensorIfUnused(m_WeightsTensor);
+
134  }
+
135  if (m_BiasesTensorInfo.IsConstant())
+
136  {
+
137  FreeTensorIfUnused(m_BiasesTensor);
+
138  }
+
139  prepared = true;
+
140  }
+
141  m_FullyConnectedLayer->run();
+
142 }
+
143 
+
144 } //namespace armnn
-
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
A FullyConnectedDescriptor for the FullyConnectedLayer.
std::shared_ptr< arm_compute::MemoryManagerOnDemand > ACLMemManagerOnDemand
-
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
+
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
- -
bool IsConstant() const
Definition: Tensor.cpp:509
-
NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
-
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
+ +
bool IsConstant() const
Definition: Tensor.cpp:513
+
NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
+
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
std::vector< TensorInfo > m_OutputTensorInfos
bool m_BiasEnabled
Enable/disable bias.
@@ -269,20 +270,21 @@ $(document).ready(function(){initNavTree('_neon_fully_connected_workload_8cpp_so
std::vector< TensorInfo > m_InputTensorInfos
-
uint32_t GetNumInputs() const
Get the number of inputs.
+
uint32_t GetNumInputs() const
Get the number of inputs.
Copyright (c) 2021 ARM Limited and Contributors.
-
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
+
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc, const ActivationDescriptor *activationDesc)
std::vector< ITensorHandle * > m_Inputs
bool has_value() const noexcept
Definition: Optional.hpp:53
+
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210