ArmNN
 20.08
PreluTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<typename FactoryType , armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > PreluTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 

Function Documentation

◆ PreluTest()

LayerTestResult<T, 4> PreluTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 24 of file PreluTestImpl.hpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreatePrelu(), armnn::IgnoreUnused(), LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

27 {
28  IgnoreUnused(memoryManager);
29 
30  armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
31  armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
32  armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType);
33 
34  if (armnn::IsQuantizedType<T>())
35  {
36  inputTensorInfo.SetQuantizationScale(0.25f);
37  inputTensorInfo.SetQuantizationOffset(128);
38  alphaTensorInfo.SetQuantizationScale(0.25f);
39  alphaTensorInfo.SetQuantizationOffset(50);
40  outputTensorInfo.SetQuantizationScale(0.5f);
41  outputTensorInfo.SetQuantizationOffset(120);
42  }
43 
44  std::vector<float> inputData
45  {
46  // Expected quantized values:
47  // 128, 128, 128, 132, 132, 132, 124, 124, 124, 120, 120, 120
48  0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -2.0f, -2.0f, -2.0f
49  };
50  std::vector<float> alphaData
51  {
52  // Expected quantized values:
53  // 50, 54, 58
54  0.0f, 1.0f, 2.0f
55  };
56  std::vector<float> outputExpectedData =
57  {
58  // Expected quantized values:
59  // 20, 120, 120, 122, 122, 122, 120, 118, 116, 120, 116, 112
60  0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
61  };
62 
63  auto input = MakeTensor<T, 4>(inputTensorInfo,
64  armnnUtils::QuantizedVector<T>(inputData,
65  inputTensorInfo.GetQuantizationScale(),
66  inputTensorInfo.GetQuantizationOffset()));
67 
68  auto alpha = MakeTensor<T, 4>(alphaTensorInfo,
69  armnnUtils::QuantizedVector<T>(alphaData,
70  alphaTensorInfo.GetQuantizationScale(),
71  alphaTensorInfo.GetQuantizationOffset()));
72 
73  LayerTestResult<T, 4> result(outputTensorInfo);
74  result.outputExpected =
75  MakeTensor<T, 4>(outputTensorInfo,
76  armnnUtils::QuantizedVector<T>(outputExpectedData,
77  outputTensorInfo.GetQuantizationScale(),
78  outputTensorInfo.GetQuantizationOffset()));
79 
80  auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
81  std::unique_ptr <armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
82  std::unique_ptr <armnn::ITensorHandle> alphaHandle = tensorHandleFactory.CreateTensorHandle(alphaTensorInfo);
83  std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
84 
85  armnn::PreluQueueDescriptor descriptor;
87  AddInputToWorkload (descriptor, info, inputTensorInfo, inputHandle.get());
88  AddInputToWorkload (descriptor, info, alphaTensorInfo, alphaHandle.get());
89  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
90 
91  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePrelu(descriptor, info);
92 
93  inputHandle->Allocate();
94  alphaHandle->Allocate();
95  outputHandle->Allocate();
96 
97  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
98  CopyDataToITensorHandle(alphaHandle.get(), &alpha[0][0][0][0]);
99 
100  workload->Execute();
101 
102  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
103 
104  return result;
105 }
void IgnoreUnused(Ts &&...)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const