ArmNN
 20.11
InstanceNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
15 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
26 LayerTestResult<T, 4> InstanceNormTestImpl(
27  armnn::IWorkloadFactory& workloadFactory,
29  const armnn::ITensorHandleFactory& tensorHandleFactory,
30  const armnn::TensorInfo& inputTensorInfo,
31  const armnn::TensorInfo& outputTensorInfo,
32  const std::vector<float>& inputValues,
33  const std::vector<float>& expectedOutputValues,
35  float qScale = 0.0f,
36  int32_t qOffset = 0)
37 {
38  IgnoreUnused(memoryManager);
39  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
40  armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
41 
42  LayerTestResult<T, 4> result(outputTensorInfo);
43  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
44  armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
45 
46  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
47  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
48 
50 
51 
52  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
53  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
54 
55  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
56 
57  inputHandle->Allocate();
58  outputHandle->Allocate();
59 
60  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
61 
62  workload->Execute();
63 
64  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
65 
66  return result;
67 }
68 
69 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
70 LayerTestResult<T, 4> InstanceNormTest(
71  armnn::IWorkloadFactory& workloadFactory,
73  const armnn::ITensorHandleFactory& tensorHandleFactory,
74  armnn::DataLayout dataLayout)
75 {
76  // BatchSize: 2
77  // Height: 2
78  // Width: 2
79  // Channels: 2
80 
81  const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
82 
83  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
84  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
85 
86  std::vector<float> inputValues
87  {
88  // Batch 0, Height 0, Width 0 x Channel (2)
89  0.f, 1.f,
90  // Batch 0, Height 0, Width 1 x Channel (2)
91  0.f, 2.f,
92 
93  // Batch 0, Height 1, Width 0 x Channel (2)
94  0.f, 2.f,
95  // Batch 0, Height 1, Width 1 x Channel (2)
96  0.f, 4.f,
97 
98  // Batch 1, Height 0, Width 0 x Channel (2)
99  1.f, -1.f,
100  // Batch 1, Height 0, Width 1 x Channel (2)
101  -1.f, 2.f,
102 
103  // Batch 1, Height 1, Width 0 x Channel (2)
104  -1.f, -2.f,
105  // Batch 1, Height 1, Width 1 x Channel (2)
106  1.f, 4.f
107  };
108 
109  std::vector<float> expectedOutputValues
110  {
111  // Batch 0, Height 0, Width 0 x Channel (2)
112  0.f, -1.1470304f,
113  // Batch 0, Height 0, Width 1 x Channel (2)
114  0.f, -0.22940612f,
115  // Batch 0, Height 1, Width 0 x Channel (2)
116  0.f, -0.22940612f,
117  // Batch 0, Height 1, Width 1 x Channel (2)
118  0.f, 1.6058424f,
119 
120  // Batch 1, Height 0, Width 0 x Channel (2)
121  0.99995005f, -0.7337929f,
122  // Batch 1, Height 0, Width 1 x Channel (2)
123  -0.99995005f, 0.52413774f,
124 
125  // Batch 1, Height 1, Width 0 x Channel (2)
126  -0.99995005f, -1.1531031f,
127  // Batch 1, Height 1, Width 1 x Channel (2)
128  0.99995005f, 1.3627582f
129  };
130 
131  if (dataLayout == armnn::DataLayout::NCHW)
132  {
133  PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
134  PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
135  }
136 
138  descriptor.m_Parameters.m_Eps = 0.0001f;
139  descriptor.m_Parameters.m_Beta = 0.0f;
140  descriptor.m_Parameters.m_Gamma = 1.0f;
141  descriptor.m_Parameters.m_DataLayout = dataLayout;
142 
143  return InstanceNormTestImpl<ArmnnType>(
144  workloadFactory,
145  memoryManager,
146  tensorHandleFactory,
147  inputTensorInfo,
148  outputTensorInfo,
149  inputValues,
150  expectedOutputValues,
151  descriptor);
152 }
153 
154 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
155 LayerTestResult<T, 4> InstanceNormTest2(
156  armnn::IWorkloadFactory& workloadFactory,
158  const armnn::ITensorHandleFactory& tensorHandleFactory,
159  armnn::DataLayout dataLayout)
160 {
161  // BatchSize: 2
162  // Height: 2
163  // Width: 2
164  // Channels: 2
165 
166  const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
167 
168  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
169  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
170 
171  std::vector<float> inputValues
172  {
173  // Batch 0, Height 0, Width 0 x Channel (2)
174  0.f, 1.f,
175  // Batch 0, Height 0, Width 1 x Channel (2)
176  0.f, 2.f,
177 
178  // Batch 0, Height 1, Width 0 x Channel (2)
179  0.f, 2.f,
180  // Batch 0, Height 1, Width 1 x Channel (2)
181  0.f, 4.f,
182 
183  // Batch 1, Height 0, Width 0 x Channel (2)
184  1.f, -1.f,
185  // Batch 1, Height 0, Width 1 x Channel (2)
186  -1.f, 2.f,
187 
188  // Batch 1, Height 1, Width 0 x Channel (2)
189  -1.f, -2.f,
190  // Batch 1, Height 1, Width 1 x Channel (2)
191  1.f, 4.f
192  };
193 
194  std::vector<float> expectedOutputValues
195  {
196  // Batch 0, Height 0, Width 0 x Channel (2)
197  10.f, 7.7059393f,
198  // Batch 0, Height 0, Width 1 x Channel (2)
199  10.f, 9.541187f,
200 
201  // Batch 0, Height 1, Width 0 x Channel (2)
202  10.f, 9.541187f,
203  // Batch 0, Height 1, Width 1 x Channel (2)
204  10.f, 13.211685f,
205 
206  // Batch 1, Height 0, Width 0 x Channel (2)
207  11.9999f, 8.532414f,
208  // Batch 1, Height 0, Width 1 x Channel (2)
209  8.0001f, 11.048275f,
210 
211  // Batch 1, Height 1, Width 0 x Channel (2)
212  8.0001f, 7.693794f,
213  // Batch 1, Height 1, Width 1 x Channel (2)
214  11.9999f, 12.725516f
215  };
216 
217  if (dataLayout == armnn::DataLayout::NCHW)
218  {
219  PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
220  PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
221  }
222 
224  descriptor.m_Parameters.m_Eps = 0.0001f;
225  descriptor.m_Parameters.m_Beta = 10.0f;
226  descriptor.m_Parameters.m_Gamma = 2.0f;
227  descriptor.m_Parameters.m_DataLayout = dataLayout;
228 
229  return InstanceNormTestImpl<ArmnnType>(
230  workloadFactory,
231  memoryManager,
232  tensorHandleFactory,
233  inputTensorInfo,
234  outputTensorInfo,
235  inputValues,
236  expectedOutputValues,
237  descriptor);
238 }
239 
240 } // anonymous namespace
241 
243  armnn::IWorkloadFactory& workloadFactory,
245  const armnn::ITensorHandleFactory& tensorHandleFactory,
246  armnn::DataLayout dataLayout)
247 {
248  return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
249 }
250 
252  armnn::IWorkloadFactory& workloadFactory,
254  const armnn::ITensorHandleFactory& tensorHandleFactory,
255  armnn::DataLayout dataLayout)
256 {
257  return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
258 }
259 
261  armnn::IWorkloadFactory& workloadFactory,
263  const armnn::ITensorHandleFactory& tensorHandleFactory,
264  armnn::DataLayout dataLayout)
265 {
266  return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
267 }
268 
270  armnn::IWorkloadFactory& workloadFactory,
272  const armnn::ITensorHandleFactory& tensorHandleFactory,
273  armnn::DataLayout dataLayout)
274 {
275  return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
276 }
DataLayout
Definition: Types.hpp:50
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
LayerTestResult< float, 4 > InstanceNormFloat32Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
LayerTestResult< float, 4 > InstanceNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)