From ae050524109f1ce827962665436ef7430f2ac479 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Wed, 22 Mar 2023 16:48:58 +0000 Subject: IVGCVSW-7255 Update Doxygen Documentation and publish on GitHub. * Updating Doxygen documentation for 23.02 release. Signed-off-by: David Monahan Change-Id: I545574ff7664b4595d2fe6a91a3c35d2ad55df82 --- latest/_neon_minimum_workload_8cpp_source.xhtml | 183 ++++++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 latest/_neon_minimum_workload_8cpp_source.xhtml (limited to 'latest/_neon_minimum_workload_8cpp_source.xhtml') diff --git a/latest/_neon_minimum_workload_8cpp_source.xhtml b/latest/_neon_minimum_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..33e6dca5b8 --- /dev/null +++ b/latest/_neon_minimum_workload_8cpp_source.xhtml @@ -0,0 +1,183 @@ + + + + + + + + + + + + + +ArmNN: src/backends/neon/workloads/NeonMinimumWorkload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
NeonMinimumWorkload.cpp
+
+
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 
+ +
7 
+ + + +
11 
+
12 namespace armnn
+
13 {
+
14 
+ +
16  const TensorInfo& input1,
+
17  const TensorInfo& output)
+
18 {
+
19  const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+
20  const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+
21  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
22 
+
23  return arm_compute::NEElementwiseMin::validate(&aclInput0,
+
24  &aclInput1,
+
25  &aclOutput);
+
26 }
+
27 
+ +
29  const WorkloadInfo& info)
+ +
31 {
+
32  m_Data.ValidateInputsOutputs("NeonMinimumWorkload", 2, 1);
+
33 
+
34  arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
35  arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+
36  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
37 
+
38  m_MinLayer.configure(&input0, &input1, &output);
+
39 }
+
40 
+ +
42 {
+
43  ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonMinimumWorkload_Execute", this->GetGuid());
+
44  m_MinLayer.run();
+
45 }
+
46 
+
47 } //namespace armnn
+
+
+ +
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:61
+
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
+ + + + +
Copyright (c) 2021 ARM Limited and Contributors.
+
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid)
+ + +
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
+
Status
Definition: Types.hpp:42
+
Contains information about TensorInfos of a layer.
+
virtual void Execute() const override
Execute the Minimum operation.
+
std::vector< ITensorHandle * > m_Outputs
+ +
NeonMinimumWorkload(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info)
Create a NeonMinimumWorkload.
+
std::vector< ITensorHandle * > m_Inputs
+ + + + + -- cgit v1.2.1