ArmNN
 20.08
PermuteTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <ResolveType.hpp>
9 
10 
13 
15 
16 #include <test/TensorHelpers.hpp>
17 
18 template<typename T>
20  armnn::IWorkloadFactory& workloadFactory,
22  armnn::PermuteDescriptor descriptor,
23  armnn::TensorInfo inputTensorInfo,
24  armnn::TensorInfo outputTensorInfo,
25  const std::vector<T>& inputData,
26  const std::vector<T>& outputExpectedData)
27 {
28  IgnoreUnused(memoryManager);
29  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
30 
31  LayerTestResult<T, 4> ret(outputTensorInfo);
32  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
33 
35  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
36  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
38 
40  data.m_Parameters = descriptor;
42  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
43  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
44 
45  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info);
46 
47  inputHandle->Allocate();
48  outputHandle->Allocate();
49 
50  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
51 
52  workload->Execute();
53 
54  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
55 
56  return ret;
57 }
58 
59 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
61  armnn::IWorkloadFactory& workloadFactory,
63 {
64  armnn::TensorInfo inputTensorInfo;
65  armnn::TensorInfo outputTensorInfo;
66 
67  unsigned int inputShape[] = { 1, 2, 2, 2 };
68  unsigned int outputShape[] = { 1, 2, 2, 2 };
69 
70  armnn::PermuteDescriptor descriptor;
71  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
72 
73  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
74  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
75 
76  // Set quantization parameters if the requested type is a quantized type.
77  float qScale = 0.5f;
78  int32_t qOffset = 5;
79  if(armnn::IsQuantizedType<T>())
80  {
81  inputTensorInfo.SetQuantizationScale(qScale);
82  inputTensorInfo.SetQuantizationOffset(qOffset);
83  outputTensorInfo.SetQuantizationScale(qScale);
84  outputTensorInfo.SetQuantizationOffset(qOffset);
85  }
86 
87  std::vector<T> input = armnnUtils::QuantizedVector<T>(
88  {
89  1, 2,
90  3, 4,
91  5, 6,
92  7, 8
93  },
94  qScale, qOffset);
95 
96  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
97  {
98  1, 5, 2, 6,
99  3, 7, 4, 8
100  },
101  qScale, qOffset);
102 
103  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
104  descriptor, inputTensorInfo,
105  outputTensorInfo, input, outputExpected);
106 }
107 
108 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
110  armnn::IWorkloadFactory& workloadFactory,
112 {
113  armnn::TensorInfo inputTensorInfo;
114  armnn::TensorInfo outputTensorInfo;
115 
116  unsigned int inputShape[] = { 1, 2, 2, 3 };
117  unsigned int outputShape[] = { 1, 3, 2, 2 };
118 
119  armnn::PermuteDescriptor descriptor;
120  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
121 
122  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
123  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
124 
125  // Set quantization parameters if the requested type is a quantized type.
126  float qScale = 0.5f;
127  int32_t qOffset = 5;
128  if(armnn::IsQuantizedType<T>())
129  {
130  inputTensorInfo.SetQuantizationScale(qScale);
131  inputTensorInfo.SetQuantizationOffset(qOffset);
132  outputTensorInfo.SetQuantizationScale(qScale);
133  outputTensorInfo.SetQuantizationOffset(qOffset);
134  }
135 
136  std::vector<T> input = armnnUtils::QuantizedVector<T>(
137  {
138  1, 2, 3,
139  11, 12, 13,
140  21, 22, 23,
141  31, 32, 33
142  },
143  qScale, qOffset);
144 
145  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
146  {
147  1, 11, 21, 31,
148  2, 12, 22, 32,
149  3, 13, 23, 33
150  },
151  qScale, qOffset);
152 
153  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
154  descriptor, inputTensorInfo,
155  outputTensorInfo, input, outputExpected);
156 }
157 
158 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
160  armnn::IWorkloadFactory& workloadFactory,
162 {
163  armnn::TensorInfo inputTensorInfo;
164  armnn::TensorInfo outputTensorInfo;
165 
166  unsigned int inputShape[] = { 1, 3, 2, 2 };
167  unsigned int outputShape[] = { 1, 2, 2, 3 };
168 
169  armnn::PermuteDescriptor descriptor;
170  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
171 
172  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
173  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
174 
175  // Set quantization parameters if the requested type is a quantized type.
176  float qScale = 0.5f;
177  int32_t qOffset = 5;
178  if(armnn::IsQuantizedType<T>())
179  {
180  inputTensorInfo.SetQuantizationScale(qScale);
181  inputTensorInfo.SetQuantizationOffset(qOffset);
182  outputTensorInfo.SetQuantizationScale(qScale);
183  outputTensorInfo.SetQuantizationOffset(qOffset);
184  }
185 
186  std::vector<T> input = armnnUtils::QuantizedVector<T>(
187  {
188  1, 11, 21, 31,
189  2, 12, 22, 32,
190  3, 13, 23, 33
191  },
192  qScale, qOffset);
193 
194  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
195  {
196  1, 2, 3,
197  11, 12, 13,
198  21, 22, 23,
199  31, 32, 33,
200  },
201  qScale, qOffset);
202 
203  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
204  descriptor, inputTensorInfo,
205  outputTensorInfo, input, outputExpected);
206 }
207 
208 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
210  armnn::IWorkloadFactory& workloadFactory,
212 {
213  armnn::TensorInfo inputTensorInfo;
214  armnn::TensorInfo outputTensorInfo;
215 
216  unsigned int inputShape[] = { 1, 2, 3, 3 };
217  unsigned int outputShape[] = { 1, 3, 2, 3 };
218 
219  armnn::PermuteDescriptor descriptor;
220  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
221 
222  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
223  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
224 
225  // Set quantization parameters if the requested type is a quantized type.
226  float qScale = 0.5f;
227  int32_t qOffset = 5;
228  if(armnn::IsQuantizedType<T>())
229  {
230  inputTensorInfo.SetQuantizationScale(qScale);
231  inputTensorInfo.SetQuantizationOffset(qOffset);
232  outputTensorInfo.SetQuantizationScale(qScale);
233  outputTensorInfo.SetQuantizationOffset(qOffset);
234  }
235 
236  std::vector<T> input = armnnUtils::QuantizedVector<T>(
237  {
238  1, 2, 3,
239  11, 12, 13,
240  21, 22, 23,
241  31, 32, 33,
242  41, 42, 43,
243  51, 52, 53
244  },
245  qScale, qOffset);
246 
247  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
248  {
249  1, 11, 21, 31, 41, 51,
250  2, 12, 22, 32, 42, 52,
251  3, 13, 23, 33, 43, 53
252  },
253  qScale, qOffset);
254 
255  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
256  descriptor, inputTensorInfo,
257  outputTensorInfo, input, outputExpected);
258 }
LayerTestResult< T, 4 > PermuteValueSet3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< T, 4 > SimplePermuteTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::PermuteDescriptor descriptor, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData)
LayerTestResult< T, 4 > PermuteValueSet1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
LayerTestResult< T, 4 > SimplePermuteTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > PermuteValueSet2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
A PermuteDescriptor for the PermuteLayer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)