ArmNN
 20.05
QuantizeTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "QuantizeTestImpl.hpp"
7 
8 #include <ResolveType.hpp>
9 
10 
13 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 namespace
20 {
21 
22 template<typename T, std::size_t Dim>
23 LayerTestResult<T, Dim> QuantizeTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::TensorInfo& inputTensorInfo,
27  const armnn::TensorInfo& outputTensorInfo,
28  const std::vector<float>& inputData,
29  const std::vector<T>& expectedOutputData,
31 {
32  IgnoreUnused(memoryManager);
33  boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
34 
35  LayerTestResult<T, Dim> ret(outputTensorInfo);
36  ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
37 
38  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
39  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
40 
42  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
43  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
44 
45  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
46 
47  inputHandle->Allocate();
48  outputHandle->Allocate();
49 
50  CopyDataToITensorHandle(inputHandle.get(), input.data());
51 
52  ExecuteWorkload(*workload, memoryManager);
53 
54  CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
55 
56  return ret;
57 }
58 
59 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
60 LayerTestResult<T, 4> QuantizeSimpleTest(
61  armnn::IWorkloadFactory& workloadFactory,
63 {
65 
66  const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
67  const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
68 
69  std::vector<float> inputData = std::vector<float>(
70  {
71  1.0f, 2.0f, 3.0f,
72  4.0f, 5.0f, 6.0f,
73  7.0f, 8.0f, 9.0f,
74  10.0f, 11.0f, 12.0f,
75  });
76 
77  std::vector<T> expectedOutputData = std::vector<T>(
78  {
79  3, 5, 7,
80  9, 11, 13,
81  15, 17, 19,
82  21, 23, 25,
83  });
84 
85  return QuantizeTestImpl<T, 4>(workloadFactory,
86  memoryManager,
87  inputTensorInfo,
88  outputTensorInfo,
89  inputData,
90  expectedOutputData,
91  desc);
92 }
93 
94 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
95 LayerTestResult<T, 4> QuantizeClampTest(
96  armnn::IWorkloadFactory& workloadFactory,
98 {
100 
101  const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
102  const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
103 
104  const T max = std::numeric_limits<T>::max();
105  const T min = std::numeric_limits<T>::lowest();
106 
107  std::vector<float> inputData = std::vector<float>(
108  {
109  -100.0f, 100.0f
110  });
111 
112  std::vector<T> expectedOutputData = std::vector<T>(
113  {
114  min, max
115  });
116 
117  return QuantizeTestImpl<T, 4>(workloadFactory,
118  memoryManager,
119  inputTensorInfo,
120  outputTensorInfo,
121  inputData,
122  expectedOutputData,
123  desc);
124 }
125 
126 } // anonymous namespace
127 
129  armnn::IWorkloadFactory& workloadFactory,
131 {
132  return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
133 }
134 
136  armnn::IWorkloadFactory& workloadFactory,
138 {
139  return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
140 }
141 
143  armnn::IWorkloadFactory& workloadFactory,
145 {
146  return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager);
147 }
148 
150  armnn::IWorkloadFactory& workloadFactory,
152 {
153  return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager);
154 }
155 
157  armnn::IWorkloadFactory& workloadFactory,
159 {
160  return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
161 }
LayerTestResult< int16_t, 4 > QuantizeClampInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< int8_t, 4 > QuantizeClampInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > QuantizeSimpleUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< uint8_t, 4 > QuantizeClampUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< int8_t, 4 > QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)