From 8efb48a6847c5cd166c561127ae6611150963ce3 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Fri, 19 May 2023 11:14:28 +0100 Subject: Update Doxygen docu for 23.05 Signed-off-by: Nikhil Raj Change-Id: I0a992286f14fa68fcc6e5eba31ac39fed003cbbe --- .../_cl_multiplication_workload_8cpp_source.xhtml | 232 +++++++++++++++++++++ 1 file changed, 232 insertions(+) create mode 100644 23.05/_cl_multiplication_workload_8cpp_source.xhtml (limited to '23.05/_cl_multiplication_workload_8cpp_source.xhtml') diff --git a/23.05/_cl_multiplication_workload_8cpp_source.xhtml b/23.05/_cl_multiplication_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..36ac1f03fb --- /dev/null +++ b/23.05/_cl_multiplication_workload_8cpp_source.xhtml @@ -0,0 +1,232 @@ + + + + + + + + + + + + + +ArmNN: src/backends/cl/workloads/ClMultiplicationWorkload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.05 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ClMultiplicationWorkload.cpp
+
+
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 
+ +
7 
+ + +
10 
+
11 #include <cl/ClTensorHandle.hpp>
+
12 
+
13 #include "ClWorkloadUtils.hpp"
+
14 
+
15 namespace armnn
+
16 {
+
17 
+ +
19  const TensorInfo& input1,
+
20  const TensorInfo& output,
+
21  const ActivationDescriptor* activationDescriptor)
+
22 {
+
23  const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+
24  const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+
25  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
26 
+
27  auto convertPolicy = (IsQuantizedType(input0.GetDataType()) || IsQuantizedType(input1.GetDataType())) ?
+
28  arm_compute::ConvertPolicy::SATURATE :
+
29  arm_compute::ConvertPolicy::WRAP;
+
30 
+
31  const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+
32  activationDescriptor);
+
33 
+
34  // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
+
35  // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
+
36  // ignored for F32 tensors.
+
37  return arm_compute::CLPixelWiseMultiplication::validate(&aclInput1,
+
38  &aclInput2,
+
39  &aclOutput,
+
40  1.0f,
+
41  convertPolicy,
+
42  arm_compute::RoundingPolicy::TO_ZERO,
+
43  activationInfo);
+
44 }
+
45 
+
46 
+ +
48  const WorkloadInfo& info,
+
49  const arm_compute::CLCompileContext& clCompileContext)
+ +
51 {
+
52  m_Data.ValidateInputsOutputs("ClMultiplicationWorkload", 2, 1);
+
53 
+
54  arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
55  arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+
56  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
57 
+
58  auto convertPolicy = (IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()) ||
+
59  IsQuantizedType(info.m_InputTensorInfos[1].GetDataType())) ?
+
60  arm_compute::ConvertPolicy::SATURATE :
+
61  arm_compute::ConvertPolicy::WRAP;
+
62 
+
63  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
64 
+
65  {
+
66  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMultiplicationWorkload_configure");
+
67  // Construct
+
68  m_PixelWiseMultiplication.configure(clCompileContext,
+
69  &input0,
+
70  &input1,
+
71  &output,
+
72  1.0f,
+
73  convertPolicy,
+
74  arm_compute::RoundingPolicy::TO_NEAREST_EVEN,
+
75  activationInfo);
+
76  }
+
77 }
+
78 
+ +
80 {
+
81  ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMultiplicationWorkload_Execute", this->GetGuid());
+
82  RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION());
+
83 }
+
84 
+
85 } //namespace armnn
+
+
+
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:61
+ +
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
+
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
+
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
+
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
+
ClMultiplicationWorkload(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
+
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:298
+ + +
Copyright (c) 2021 ARM Limited and Contributors.
+ +
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
+ +
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
+ + + +
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
+
Status
Definition: Types.hpp:42
+ + +
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
+
Contains information about TensorInfos of a layer.
+ +
std::vector< ITensorHandle * > m_Outputs
+
#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid)
+ +
DataType GetDataType() const
Definition: Tensor.hpp:198
+
std::vector< ITensorHandle * > m_Inputs
+ + + + + -- cgit v1.2.1