ArmNN
 20.08
FakeQuantizationTestImpl.hpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 2 > FakeQuantizationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 

Function Documentation

◆ FakeQuantizationTest()

LayerTestResult<float, 2> FakeQuantizationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 16 of file FakeQuantizationTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateFakeQuantization(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), LayerTestResult< T, n >::output, and LayerTestResult< T, n >::outputExpected.

19 {
20  IgnoreUnused(memoryManager);
21  constexpr unsigned int width = 2;
22  constexpr unsigned int height = 3;
23 
24  const armnn::TensorInfo tensorInfo({height, width },
26 
27  auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
28  -10.0f, -5.0f,
29  0.0f, 5.0f,
30  10.0f, 10.0f
31  }));
32 
33  LayerTestResult<float, 2> ret(tensorInfo);
34 
36  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
37  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
39 
42 
43  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
44  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
45 
46  float min = -10.f;
47  float max = 10.f;
48 
49  data.m_Parameters.m_Min = min;
50  data.m_Parameters.m_Max = max;
51 
52  armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
54  armnn::WorkloadInfo refInfo = info;
55  SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
56 
57  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
58 
59  inputHandle->Allocate();
60  outputHandle->Allocate();
61 
62  CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
63 
64  workload->PostAllocationConfigure();
65  workload->Execute();
66 
67  CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
68 
69  ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
70  0.0f, 63.0f,
71  128.0f, 191.0f,
72  255.0f, 255.0f
73  }));
74 
75  return ret;
76 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)