ArmNN
 22.05.01
NeonTimerTest.cpp File Reference

Go to the source code of this file.

Functions

 TEST_SUITE ("NeonTimerInstrument")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "NeonTimerInstrument"  )

Definition at line 28 of file NeonTimerTest.cpp.

References armnn::Activation, ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BoundedReLu, CopyDataToITensorHandle(), NeonWorkloadFactory::CreateTensorHandle(), NeonWorkloadFactory::CreateWorkload(), armnn::Float32, NeonTimer::GetName(), and NeonTimer::Start().

29 {
30 
31 TEST_CASE("NeonTimerGetName")
32 {
33  NeonTimer neonTimer;
34  CHECK_EQ(std::string(neonTimer.GetName()), "NeonKernelTimer");
35 }
36 
37 TEST_CASE("NeonTimerMeasure")
38 {
39  NeonWorkloadFactory workloadFactory =
40  NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
41 
42  unsigned int inputWidth = 2000u;
43  unsigned int inputHeight = 2000u;
44  unsigned int inputChannels = 1u;
45  unsigned int inputBatchSize = 1u;
46 
47  float upperBound = 1.0f;
48  float lowerBound = -1.0f;
49 
50  size_t inputSize = inputWidth * inputHeight * inputChannels * inputBatchSize;
51  std::vector<float> inputData(inputSize, 0.f);
52  std::generate(inputData.begin(), inputData.end(), [](){
53  return (static_cast<float>(rand()) / static_cast<float>(RAND_MAX / 3)) + 1.f; });
54 
55  unsigned int outputWidth = inputWidth;
56  unsigned int outputHeight = inputHeight;
57  unsigned int outputChannels = inputChannels;
58  unsigned int outputBatchSize = inputBatchSize;
59 
60  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
62 
63  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
65 
67  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
68  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
70 
71  // Setup bounded ReLu
73  armnn::WorkloadInfo workloadInfo;
74  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
75  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
76 
77  descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
78  descriptor.m_Parameters.m_A = upperBound;
79  descriptor.m_Parameters.m_B = lowerBound;
80 
81  std::unique_ptr<armnn::IWorkload> workload
82  = workloadFactory.CreateWorkload(LayerType::Activation, descriptor, workloadInfo);
83 
84  inputHandle->Allocate();
85  outputHandle->Allocate();
86 
87  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
88 
89  NeonTimer neonTimer;
90  // Start the timer.
91  neonTimer.Start();
92  // Execute the workload.
93  workload->Execute();
94  // Stop the timer.
95  neonTimer.Stop();
96 
97  std::vector<Measurement> measurements = neonTimer.GetMeasurements();
98 
99  CHECK(measurements.size() <= 2);
100  if (measurements.size() > 1)
101  {
102  CHECK_EQ(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
103  CHECK(measurements[0].m_Value > 0.0);
104  }
105  std::ostringstream oss_neon;
106  std::ostringstream oss_cpu;
107  oss_neon << "NeonKernelTimer/" << measurements.size()-1 << ": NEActivationLayerKernel";
108  oss_cpu << "NeonKernelTimer/" << measurements.size()-1 << ": CpuActivationKernel";
109  bool kernelCheck = ((measurements[measurements.size()-1].m_Name.find(oss_neon.str()) != std::string::npos)
110  || (measurements[measurements.size()-1].m_Name.find(oss_cpu.str()) != std::string::npos));
111  CHECK(kernelCheck);
112  CHECK(measurements[measurements.size()-1].m_Value > 0.0);
113 }
114 
115 }
void Start() override
Definition: NeonTimer.cpp:21
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
min(a, max(b, input)) ReLu1 & ReLu6.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
const char * GetName() const override
Definition: NeonTimer.cpp:58
Contains information about TensorInfos of a layer.
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override