ArmNN
 20.02
InstanceNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
15 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
26 LayerTestResult<T, 4> InstanceNormTestImpl(
27  armnn::IWorkloadFactory& workloadFactory,
29  const armnn::TensorInfo& inputTensorInfo,
30  const armnn::TensorInfo& outputTensorInfo,
31  const std::vector<float>& inputValues,
32  const std::vector<float>& expectedOutputValues,
34  float qScale = 0.0f,
35  int32_t qOffset = 0)
36 {
37  IgnoreUnused(memoryManager);
38  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
39  armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
40 
41  LayerTestResult<T, 4> result(outputTensorInfo);
42  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
43  armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
44 
45  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
46  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
47 
49 
50 
51  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
52  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
53 
54  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
55 
56  inputHandle->Allocate();
57  outputHandle->Allocate();
58 
59  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
60 
61  workload->Execute();
62 
63  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
64 
65  return result;
66 }
67 
68 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
69 LayerTestResult<T, 4> InstanceNormTest(
70  armnn::IWorkloadFactory& workloadFactory,
72  armnn::DataLayout dataLayout)
73 {
74  // BatchSize: 2
75  // Height: 2
76  // Width: 2
77  // Channels: 2
78 
79  const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
80 
81  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
82  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
83 
84  std::vector<float> inputValues
85  {
86  // Batch 0, Height 0, Width 0 x Channel (2)
87  0.f, 1.f,
88  // Batch 0, Height 0, Width 1 x Channel (2)
89  0.f, 2.f,
90 
91  // Batch 0, Height 1, Width 0 x Channel (2)
92  0.f, 2.f,
93  // Batch 0, Height 1, Width 1 x Channel (2)
94  0.f, 4.f,
95 
96  // Batch 1, Height 0, Width 0 x Channel (2)
97  1.f, -1.f,
98  // Batch 1, Height 0, Width 1 x Channel (2)
99  -1.f, 2.f,
100 
101  // Batch 1, Height 1, Width 0 x Channel (2)
102  -1.f, -2.f,
103  // Batch 1, Height 1, Width 1 x Channel (2)
104  1.f, 4.f
105  };
106 
107  std::vector<float> expectedOutputValues
108  {
109  // Batch 0, Height 0, Width 0 x Channel (2)
110  0.f, -1.1470304f,
111  // Batch 0, Height 0, Width 1 x Channel (2)
112  0.f, -0.22940612f,
113  // Batch 0, Height 1, Width 0 x Channel (2)
114  0.f, -0.22940612f,
115  // Batch 0, Height 1, Width 1 x Channel (2)
116  0.f, 1.6058424f,
117 
118  // Batch 1, Height 0, Width 0 x Channel (2)
119  0.99995005f, -0.7337929f,
120  // Batch 1, Height 0, Width 1 x Channel (2)
121  -0.99995005f, 0.52413774f,
122 
123  // Batch 1, Height 1, Width 0 x Channel (2)
124  -0.99995005f, -1.1531031f,
125  // Batch 1, Height 1, Width 1 x Channel (2)
126  0.99995005f, 1.3627582f
127  };
128 
129  if (dataLayout == armnn::DataLayout::NCHW)
130  {
131  PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
132  PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
133  }
134 
136  descriptor.m_Parameters.m_Eps = 0.0001f;
137  descriptor.m_Parameters.m_Beta = 0.0f;
138  descriptor.m_Parameters.m_Gamma = 1.0f;
139  descriptor.m_Parameters.m_DataLayout = dataLayout;
140 
141  return InstanceNormTestImpl<ArmnnType>(
142  workloadFactory,
143  memoryManager,
144  inputTensorInfo,
145  outputTensorInfo,
146  inputValues,
147  expectedOutputValues,
148  descriptor);
149 }
150 
151 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
152 LayerTestResult<T, 4> InstanceNormTest2(
153  armnn::IWorkloadFactory& workloadFactory,
155  armnn::DataLayout dataLayout)
156 {
157  // BatchSize: 2
158  // Height: 2
159  // Width: 2
160  // Channels: 2
161 
162  const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
163 
164  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
165  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
166 
167  std::vector<float> inputValues
168  {
169  // Batch 0, Height 0, Width 0 x Channel (2)
170  0.f, 1.f,
171  // Batch 0, Height 0, Width 1 x Channel (2)
172  0.f, 2.f,
173 
174  // Batch 0, Height 1, Width 0 x Channel (2)
175  0.f, 2.f,
176  // Batch 0, Height 1, Width 1 x Channel (2)
177  0.f, 4.f,
178 
179  // Batch 1, Height 0, Width 0 x Channel (2)
180  1.f, -1.f,
181  // Batch 1, Height 0, Width 1 x Channel (2)
182  -1.f, 2.f,
183 
184  // Batch 1, Height 1, Width 0 x Channel (2)
185  -1.f, -2.f,
186  // Batch 1, Height 1, Width 1 x Channel (2)
187  1.f, 4.f
188  };
189 
190  std::vector<float> expectedOutputValues
191  {
192  // Batch 0, Height 0, Width 0 x Channel (2)
193  10.f, 7.7059393f,
194  // Batch 0, Height 0, Width 1 x Channel (2)
195  10.f, 9.541187f,
196 
197  // Batch 0, Height 1, Width 0 x Channel (2)
198  10.f, 9.541187f,
199  // Batch 0, Height 1, Width 1 x Channel (2)
200  10.f, 13.211685f,
201 
202  // Batch 1, Height 0, Width 0 x Channel (2)
203  11.9999f, 8.532414f,
204  // Batch 1, Height 0, Width 1 x Channel (2)
205  8.0001f, 11.048275f,
206 
207  // Batch 1, Height 1, Width 0 x Channel (2)
208  8.0001f, 7.693794f,
209  // Batch 1, Height 1, Width 1 x Channel (2)
210  11.9999f, 12.725516f
211  };
212 
213  if (dataLayout == armnn::DataLayout::NCHW)
214  {
215  PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
216  PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
217  }
218 
220  descriptor.m_Parameters.m_Eps = 0.0001f;
221  descriptor.m_Parameters.m_Beta = 10.0f;
222  descriptor.m_Parameters.m_Gamma = 2.0f;
223  descriptor.m_Parameters.m_DataLayout = dataLayout;
224 
225  return InstanceNormTestImpl<ArmnnType>(
226  workloadFactory,
227  memoryManager,
228  inputTensorInfo,
229  outputTensorInfo,
230  inputValues,
231  expectedOutputValues,
232  descriptor);
233 }
234 
235 } // anonymous namespace
236 
238  armnn::IWorkloadFactory& workloadFactory,
240  armnn::DataLayout dataLayout)
241 {
242  return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
243 }
244 
246  armnn::IWorkloadFactory& workloadFactory,
248  armnn::DataLayout dataLayout)
249 {
250  return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
251 }
252 
254  armnn::IWorkloadFactory& workloadFactory,
256  armnn::DataLayout dataLayout)
257 {
258  return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
259 }
260 
262  armnn::IWorkloadFactory& workloadFactory,
264  armnn::DataLayout dataLayout)
265 {
266  return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
267 }
DataLayout
Definition: Types.hpp:49
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
LayerTestResult< float, 4 > InstanceNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
Contains information about inputs and outputs to a layer.
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > InstanceNormFloat32Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)