ArmNN
 22.08
FakeQuantizationTestImpl.hpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 2 > FakeQuantizationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 

Function Documentation

◆ FakeQuantizationTest()

LayerTestResult<float, 2> FakeQuantizationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 16 of file FakeQuantizationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::FakeQuantization, armnn::Float32, armnn::IgnoreUnused(), FakeQuantizationDescriptor::m_Max, FakeQuantizationDescriptor::m_Min, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

Referenced by TEST_SUITE().

20 {
21  IgnoreUnused(memoryManager);
22  constexpr unsigned int width = 2;
23  constexpr unsigned int height = 3;
24 
25  const armnn::TensorInfo tensorInfo({ height, width }, armnn::DataType::Float32);
26 
27  std::vector<float> input =
28  {
29  -10.0f, -5.0f,
30  0.0f, 5.0f,
31  10.0f, 10.0f
32  };
33 
34  std::vector<float> actualOutput(tensorInfo.GetNumElements());
35  std::vector<float> expectedOutput(tensorInfo.GetNumElements());
36 
37  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
38  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
39 
42 
43  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
44  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
45 
46  float min = -10.f;
47  float max = 10.f;
48 
49  data.m_Parameters.m_Min = min;
50  data.m_Parameters.m_Max = max;
51 
52  armnn::PassthroughTensorHandle refHandle(tensorInfo, expectedOutput.data());
54  armnn::WorkloadInfo refInfo = info;
55  SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
56 
57  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FakeQuantization,
58  data,
59  info);
60 
61  inputHandle->Allocate();
62  outputHandle->Allocate();
63 
64  CopyDataToITensorHandle(inputHandle.get(), input.data());
65 
66  workload->PostAllocationConfigure();
67  workload->Execute();
68 
69  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
70 
71  expectedOutput =
72  {
73  0.0f, 63.0f,
74  128.0f, 191.0f,
75  255.0f, 255.0f
76  };
77 
78  return LayerTestResult<float, 2>(actualOutput,
79  expectedOutput,
80  outputHandle->GetShape(),
81  tensorInfo.GetShape());
82 }
void IgnoreUnused(Ts &&...)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0