ArmNN
 20.08
QuantizeTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "QuantizeTestImpl.hpp"
7 
8 #include <ResolveType.hpp>
9 
10 
13 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 namespace
20 {
21 
22 template<typename T, std::size_t Dim>
23 LayerTestResult<T, Dim> QuantizeTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::TensorInfo& inputTensorInfo,
27  const armnn::TensorInfo& outputTensorInfo,
28  const std::vector<float>& inputData,
29  const std::vector<T>& expectedOutputData,
31 {
32  IgnoreUnused(memoryManager);
33  boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
34 
35  LayerTestResult<T, Dim> ret(outputTensorInfo);
36  ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
37 
39  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
40  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
42 
44  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
45  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
46 
47  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
48 
49  inputHandle->Allocate();
50  outputHandle->Allocate();
51 
52  CopyDataToITensorHandle(inputHandle.get(), input.data());
53 
54  ExecuteWorkload(*workload, memoryManager);
55 
56  CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
57 
58  return ret;
59 }
60 
61 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
62 LayerTestResult<T, 4> QuantizeSimpleTest(
63  armnn::IWorkloadFactory& workloadFactory,
65 {
67 
68  const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
69  const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
70 
71  std::vector<float> inputData = std::vector<float>(
72  {
73  1.0f, 2.0f, 3.0f,
74  4.0f, 5.0f, 6.0f,
75  7.0f, 8.0f, 9.0f,
76  10.0f, 11.0f, 12.0f,
77  });
78 
79  std::vector<T> expectedOutputData = std::vector<T>(
80  {
81  3, 5, 7,
82  9, 11, 13,
83  15, 17, 19,
84  21, 23, 25,
85  });
86 
87  return QuantizeTestImpl<T, 4>(workloadFactory,
88  memoryManager,
89  inputTensorInfo,
90  outputTensorInfo,
91  inputData,
92  expectedOutputData,
93  desc);
94 }
95 
96 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
97 LayerTestResult<T, 4> QuantizeClampTest(
98  armnn::IWorkloadFactory& workloadFactory,
100 {
102 
103  const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
104  const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
105 
106  const T max = std::numeric_limits<T>::max();
107  const T min = std::numeric_limits<T>::lowest();
108 
109  std::vector<float> inputData = std::vector<float>(
110  {
111  -100.0f, 100.0f
112  });
113 
114  std::vector<T> expectedOutputData = std::vector<T>(
115  {
116  min, max
117  });
118 
119  return QuantizeTestImpl<T, 4>(workloadFactory,
120  memoryManager,
121  inputTensorInfo,
122  outputTensorInfo,
123  inputData,
124  expectedOutputData,
125  desc);
126 }
127 
128 } // anonymous namespace
129 
131  armnn::IWorkloadFactory& workloadFactory,
133 {
134  return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
135 }
136 
138  armnn::IWorkloadFactory& workloadFactory,
140 {
141  return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
142 }
143 
145  armnn::IWorkloadFactory& workloadFactory,
147 {
148  return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager);
149 }
150 
152  armnn::IWorkloadFactory& workloadFactory,
154 {
155  return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager);
156 }
157 
159  armnn::IWorkloadFactory& workloadFactory,
161 {
162  return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
163 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< int16_t, 4 > QuantizeClampInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< int8_t, 4 > QuantizeClampInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > QuantizeSimpleUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< uint8_t, 4 > QuantizeClampUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< int8_t, 4 > QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)