ArmNN
 22.05
QuantizeTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "QuantizeTestImpl.hpp"
7 
8 #include <ResolveType.hpp>
9 
10 
13 
16 
18 
19 namespace
20 {
21 
22 template<typename T, std::size_t Dim>
23 LayerTestResult<T, Dim> QuantizeTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::ITensorHandleFactory& tensorHandleFactory,
27  const armnn::TensorInfo& inputTensorInfo,
28  const armnn::TensorInfo& outputTensorInfo,
29  const std::vector<float>& inputData,
30  const std::vector<T>& expectedOutputData,
32 {
33  IgnoreUnused(memoryManager);
34  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
35 
36  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
37  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
38 
40  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
41  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
42 
43  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Quantize,
44  descriptor,
45  info);
46 
47  inputHandle->Allocate();
48  outputHandle->Allocate();
49 
50  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
51 
52  ExecuteWorkload(*workload, memoryManager);
53 
54  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
55 
56  return LayerTestResult<T, Dim>(actualOutput,
57  expectedOutputData,
58  outputHandle->GetShape(),
59  outputTensorInfo.GetShape());
60 }
61 
62 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
63 LayerTestResult<T, 4> QuantizeSimpleTest(
64  armnn::IWorkloadFactory& workloadFactory,
66  const armnn::ITensorHandleFactory& tensorHandleFactory)
67 {
69 
70  const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
71  const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
72 
73  std::vector<float> inputData = std::vector<float>(
74  {
75  1.0f, 2.0f, 3.0f,
76  4.0f, 5.0f, 6.0f,
77  7.0f, 8.0f, 9.0f,
78  10.0f, 11.0f, 12.0f,
79  });
80 
81  std::vector<T> expectedOutputData = std::vector<T>(
82  {
83  3, 5, 7,
84  9, 11, 13,
85  15, 17, 19,
86  21, 23, 25,
87  });
88 
89  return QuantizeTestImpl<T, 4>(workloadFactory,
90  memoryManager,
91  tensorHandleFactory,
92  inputTensorInfo,
93  outputTensorInfo,
94  inputData,
95  expectedOutputData,
96  desc);
97 }
98 
99 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
100 LayerTestResult<T, 4> QuantizeClampTest(
101  armnn::IWorkloadFactory& workloadFactory,
103  const armnn::ITensorHandleFactory& tensorHandleFactory)
104 {
106 
107  const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
108  const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
109 
110  const T max = std::numeric_limits<T>::max();
111  const T min = std::numeric_limits<T>::lowest();
112 
113  std::vector<float> inputData = std::vector<float>(
114  {
115  -100.0f, 100.0f
116  });
117 
118  std::vector<T> expectedOutputData = std::vector<T>(
119  {
120  min, max
121  });
122 
123  return QuantizeTestImpl<T, 4>(workloadFactory,
124  memoryManager,
125  tensorHandleFactory,
126  inputTensorInfo,
127  outputTensorInfo,
128  inputData,
129  expectedOutputData,
130  desc);
131 }
132 
133 } // anonymous namespace
134 
136  armnn::IWorkloadFactory& workloadFactory,
138  const armnn::ITensorHandleFactory& tensorHandleFactory)
139 {
140  return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
141 }
142 
144  armnn::IWorkloadFactory& workloadFactory,
146  const armnn::ITensorHandleFactory& tensorHandleFactory)
147 {
148  return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
149 }
150 
152  armnn::IWorkloadFactory& workloadFactory,
154  const armnn::ITensorHandleFactory& tensorHandleFactory)
155 {
156  return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
157 }
158 
160  armnn::IWorkloadFactory& workloadFactory,
162  const armnn::ITensorHandleFactory& tensorHandleFactory)
163 {
164  return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
165 }
166 
168  armnn::IWorkloadFactory& workloadFactory,
170  const armnn::ITensorHandleFactory& tensorHandleFactory)
171 {
172  return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory);
173 }
LayerTestResult< int8_t, 4 > QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void IgnoreUnused(Ts &&...)
LayerTestResult< int8_t, 4 > QuantizeClampInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > QuantizeClampUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
LayerTestResult< int16_t, 4 > QuantizeClampInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > QuantizeSimpleUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196