ArmNN
 21.02
QuantizeTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "QuantizeTestImpl.hpp"
7 
8 #include <ResolveType.hpp>
9 
10 
13 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 namespace
20 {
21 
22 template<typename T, std::size_t Dim>
23 LayerTestResult<T, Dim> QuantizeTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::ITensorHandleFactory& tensorHandleFactory,
27  const armnn::TensorInfo& inputTensorInfo,
28  const armnn::TensorInfo& outputTensorInfo,
29  const std::vector<float>& inputData,
30  const std::vector<T>& expectedOutputData,
32 {
33  IgnoreUnused(memoryManager);
34  boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
35 
36  LayerTestResult<T, Dim> ret(outputTensorInfo);
37  ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
38 
39  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
41 
43  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
45 
46  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
47 
48  inputHandle->Allocate();
49  outputHandle->Allocate();
50 
51  CopyDataToITensorHandle(inputHandle.get(), input.data());
52 
53  ExecuteWorkload(*workload, memoryManager);
54 
55  CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
56 
57  return ret;
58 }
59 
60 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
61 LayerTestResult<T, 4> QuantizeSimpleTest(
62  armnn::IWorkloadFactory& workloadFactory,
64  const armnn::ITensorHandleFactory& tensorHandleFactory)
65 {
67 
68  const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
69  const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
70 
71  std::vector<float> inputData = std::vector<float>(
72  {
73  1.0f, 2.0f, 3.0f,
74  4.0f, 5.0f, 6.0f,
75  7.0f, 8.0f, 9.0f,
76  10.0f, 11.0f, 12.0f,
77  });
78 
79  std::vector<T> expectedOutputData = std::vector<T>(
80  {
81  3, 5, 7,
82  9, 11, 13,
83  15, 17, 19,
84  21, 23, 25,
85  });
86 
87  return QuantizeTestImpl<T, 4>(workloadFactory,
88  memoryManager,
89  tensorHandleFactory,
90  inputTensorInfo,
91  outputTensorInfo,
92  inputData,
93  expectedOutputData,
94  desc);
95 }
96 
97 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
98 LayerTestResult<T, 4> QuantizeClampTest(
99  armnn::IWorkloadFactory& workloadFactory,
101  const armnn::ITensorHandleFactory& tensorHandleFactory)
102 {
104 
105  const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
106  const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
107 
108  const T max = std::numeric_limits<T>::max();
109  const T min = std::numeric_limits<T>::lowest();
110 
111  std::vector<float> inputData = std::vector<float>(
112  {
113  -100.0f, 100.0f
114  });
115 
116  std::vector<T> expectedOutputData = std::vector<T>(
117  {
118  min, max
119  });
120 
121  return QuantizeTestImpl<T, 4>(workloadFactory,
122  memoryManager,
123  tensorHandleFactory,
124  inputTensorInfo,
125  outputTensorInfo,
126  inputData,
127  expectedOutputData,
128  desc);
129 }
130 
131 } // anonymous namespace
132 
134  armnn::IWorkloadFactory& workloadFactory,
136  const armnn::ITensorHandleFactory& tensorHandleFactory)
137 {
138  return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
139 }
140 
142  armnn::IWorkloadFactory& workloadFactory,
144  const armnn::ITensorHandleFactory& tensorHandleFactory)
145 {
146  return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
147 }
148 
150  armnn::IWorkloadFactory& workloadFactory,
152  const armnn::ITensorHandleFactory& tensorHandleFactory)
153 {
154  return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
155 }
156 
158  armnn::IWorkloadFactory& workloadFactory,
160  const armnn::ITensorHandleFactory& tensorHandleFactory)
161 {
162  return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
163 }
164 
166  armnn::IWorkloadFactory& workloadFactory,
168  const armnn::ITensorHandleFactory& tensorHandleFactory)
169 {
170  return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory);
171 }
LayerTestResult< int8_t, 4 > QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< int8_t, 4 > QuantizeClampInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > QuantizeClampUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
LayerTestResult< int16_t, 4 > QuantizeClampInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > QuantizeSimpleUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)