From 03c7ff3f6188240baaeaeb405a357a0c58195fec Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Tue, 22 Aug 2023 12:00:04 +0100 Subject: IVGCVSW-7702 Update Doxygen Docu for 23.08 Signed-off-by: Nikhil Raj Change-Id: I357a9f7e47614589327c1ac5d95b6224ff77103d --- latest/_cl_concat_workload_8cpp_source.html | 248 ++++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 latest/_cl_concat_workload_8cpp_source.html (limited to 'latest/_cl_concat_workload_8cpp_source.html') diff --git a/latest/_cl_concat_workload_8cpp_source.html b/latest/_cl_concat_workload_8cpp_source.html new file mode 100644 index 0000000000..3dcf812cc0 --- /dev/null +++ b/latest/_cl_concat_workload_8cpp_source.html @@ -0,0 +1,248 @@ + + + + + + + + +Arm NN: src/backends/cl/workloads/ClConcatWorkload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.08 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ClConcatWorkload.cpp
+
+
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 #include "ClConcatWorkload.hpp"
+
6 #include "ClWorkloadUtils.hpp"
+ + + +
10 #include <cl/ClTensorHandle.hpp>
+
11 #include <cl/ClLayerSupport.hpp>
+
12 
+
13 #include <arm_compute/core/Types.h>
+
14 
+
15 namespace armnn
+
16 {
+
17 using namespace armcomputetensorutils;
+
18 
+
19 namespace
+
20 {
+
21 size_t CalcAxis(const OriginsDescriptor& descriptor)
+
22 {
+
23  return (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+
24 }
+
25 } //namespace
+
26 
+
27 arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+
28  const TensorInfo& output,
+
29  const OriginsDescriptor& descriptor)
+
30 {
+
31  std::vector<arm_compute::TensorInfo> aclInputs;
+
32  for (const TensorInfo* input : inputs)
+
33  {
+
34  arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
+
35  aclInputs.emplace_back(aclInputInfo);
+
36  }
+
37  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
38  std::vector<const arm_compute::ITensorInfo*> aclInputPtrs;
+
39  for (arm_compute::ITensorInfo& input : aclInputs)
+
40  {
+
41  aclInputPtrs.emplace_back(&input);
+
42  }
+
43 
+
44  size_t aclAxis = CalcAxis(descriptor);
+
45  return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
+
46 }
+
47 
+ +
49  const WorkloadInfo& info,
+
50  const arm_compute::CLCompileContext& clCompileContext)
+ +
52 {
+
53  // Report Profiling Details
+
54  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClConcatWorkload_Construct",
+
55  descriptor.m_Parameters,
+
56  info,
+
57  this->GetGuid());
+
58 
+
59  bool allInputsAreSubtensors = true;
+
60 
+
61  // Check that all inputs are sub-tensors
+
62  for (auto input : descriptor.m_Inputs)
+
63  {
+
64  if (!input->GetParent())
+
65  {
+
66  // Non sub-tensor input found so we need to execute the concat function
+
67  allInputsAreSubtensors = false;
+
68  break;
+
69  }
+
70  }
+
71 
+
72  if (allInputsAreSubtensors)
+
73  {
+
74  // Can skip configuring the concat function since it's not executed
+
75  return;
+
76  }
+
77 
+
78  std::vector<const arm_compute::ICLTensor *> aclInputs;
+
79  for (auto input : m_Data.m_Inputs)
+
80  {
+
81  arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
+
82  aclInputs.emplace_back(&aclInput);
+
83  }
+
84 
+
85  arm_compute::ICLTensor& output =
+
86  armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+
87 
+
88  // Create the layer function
+
89  auto layer = std::make_unique<arm_compute::CLConcatenateLayer>();
+
90 
+
91  {
+
92  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConcatWorkload_configure");
+
93  // Configure input and output tensors
+
94  size_t aclAxis = CalcAxis(descriptor.m_Parameters);
+
95  layer->configure(clCompileContext, aclInputs, &output, aclAxis);
+
96  }
+
97 
+
98  // Prepare
+
99  layer->prepare();
+
100  m_Layer = std::move(layer);
+
101 }
+
102 
+ +
104 {
+
105  if (m_Layer)
+
106  {
+
107  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConcatWorkload_Execute");
+
108  m_Layer->run();
+
109  }
+
110 }
+
111 
+
112 } //namespace armnn
+
+
+ + + + +
void Execute() const override
+ +
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
+
Contains information about TensorInfos of a layer.
+ + +
ClConcatWorkload(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
+ + +
std::vector< ITensorHandle * > m_Outputs
+
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
+ +
Status
Definition: Types.hpp:42
+ + +
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
+
An OriginsDescriptor for the ConcatLayer.
+
Copyright (c) 2021 ARM Limited and Contributors.
+ +
std::vector< ITensorHandle * > m_Inputs
+ + + + + -- cgit v1.2.1