From de36e4a9c299028e792c3a5bd99ad0816d806077 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Fri, 13 Mar 2020 16:26:19 +0000 Subject: IVGCVSW-3726 Upload ArmNN Doxygen files * Upload current ArmNN Doxygen files Signed-off-by: Ryan OShea Change-Id: I8989ed16ee40a99a4495b100bd009cf3e24a7285 --- Documentation/_neon_timer_test_8cpp.xhtml | 201 ++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 Documentation/_neon_timer_test_8cpp.xhtml (limited to 'Documentation/_neon_timer_test_8cpp.xhtml') diff --git a/Documentation/_neon_timer_test_8cpp.xhtml b/Documentation/_neon_timer_test_8cpp.xhtml new file mode 100644 index 0000000000..6f70adb731 --- /dev/null +++ b/Documentation/_neon_timer_test_8cpp.xhtml @@ -0,0 +1,201 @@ + + + + + + + + + + + + + +ArmNN: src/backends/neon/test/NeonTimerTest.cpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
NeonTimerTest.cpp File Reference
+
+
+
#include "NeonWorkloadFactoryHelper.hpp"
+#include <test/TensorHelpers.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <neon/NeonTimer.hpp>
+#include <neon/NeonWorkloadFactory.hpp>
+#include <backendsCommon/test/LayerTests.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+#include <boost/test/unit_test.hpp>
+#include <cstdlib>
+#include <algorithm>
+
+

Go to the source code of this file.

+ + + + + + +

+Functions

 BOOST_AUTO_TEST_CASE (NeonTimerGetName)
 
 BOOST_AUTO_TEST_CASE (NeonTimerMeasure)
 
+

Function Documentation

+ +

◆ BOOST_AUTO_TEST_CASE() [1/2]

+ +
+
+ + + + + + + + +
BOOST_AUTO_TEST_CASE (NeonTimerGetName )
+
+ +

Definition at line 31 of file NeonTimerTest.cpp.

+ +

References NeonTimer::GetName().

+
32 {
33  NeonTimer neonTimer;
34  BOOST_CHECK_EQUAL(neonTimer.GetName(), "NeonKernelTimer");
35 }
+
const char * GetName() const override
Definition: NeonTimer.cpp:58
+
+
+
+ +

◆ BOOST_AUTO_TEST_CASE() [2/2]

+ +
+
+ + + + + + + + +
BOOST_AUTO_TEST_CASE (NeonTimerMeasure )
+
+ +

Definition at line 37 of file NeonTimerTest.cpp.

+ +

References BOOST_AUTO_TEST_SUITE_END(), BOOST_CHECK(), armnn::BoundedReLu, CopyDataToITensorHandle(), NeonWorkloadFactory::CreateActivation(), NeonWorkloadFactory::CreateTensorHandle(), armnn::Float32, ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and NeonTimer::Start().

+
38 {
39  NeonWorkloadFactory workloadFactory =
40  NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
41 
42  unsigned int inputWidth = 2000u;
43  unsigned int inputHeight = 2000u;
44  unsigned int inputChannels = 1u;
45  unsigned int inputBatchSize = 1u;
46 
47  float upperBound = 1.0f;
48  float lowerBound = -1.0f;
49 
50  size_t inputSize = inputWidth * inputHeight * inputChannels * inputBatchSize;
51  std::vector<float> inputData(inputSize, 0.f);
52  std::generate(inputData.begin(), inputData.end(), [](){
53  return (static_cast<float>(rand()) / static_cast<float>(RAND_MAX / 3)) + 1.f; });
54 
55  unsigned int outputWidth = inputWidth;
56  unsigned int outputHeight = inputHeight;
57  unsigned int outputChannels = inputChannels;
58  unsigned int outputBatchSize = inputBatchSize;
59 
60  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
62 
63  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
65 
66  LayerTestResult<float, 4> result(inputTensorInfo);
67 
68  auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
69 
70  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
71  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
72 
73  // Setup bounded ReLu
75  armnn::WorkloadInfo workloadInfo;
76  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
77  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
78 
80  descriptor.m_Parameters.m_A = upperBound;
81  descriptor.m_Parameters.m_B = lowerBound;
82 
83  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
84 
85  inputHandle->Allocate();
86  outputHandle->Allocate();
87 
88  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
89 
90  NeonTimer neonTimer;
91  // Start the timer.
92  neonTimer.Start();
93  // Execute the workload.
94  workload->Execute();
95  // Stop the timer.
96  neonTimer.Stop();
97 
98  std::vector<Measurement> measurements = neonTimer.GetMeasurements();
99 
100  BOOST_CHECK(measurements.size() <= 2);
101  if (measurements.size() > 1)
102  {
103  BOOST_CHECK_EQUAL(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
104  BOOST_CHECK(measurements[0].m_Value > 0.0);
105  }
106  std::ostringstream oss;
107  oss << "NeonKernelTimer/" << measurements.size()-1 << ": NEActivationLayerKernel";
108  BOOST_CHECK_EQUAL(measurements[measurements.size()-1].m_Name, oss.str());
109  BOOST_CHECK(measurements[measurements.size()-1].m_Value > 0.0);
110 }
void Start() override
Definition: NeonTimer.cpp:21
+ + + +
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
+ +
min(a, max(b, input)) ReLu1 & ReLu6.
+
std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
+
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+ +
Contains information about inputs and outputs to a layer.
+ +
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+ +
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
+
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+
+
+ + + + -- cgit v1.2.1