ArmNN
 20.08
InstanceNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
15 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
26 LayerTestResult<T, 4> InstanceNormTestImpl(
27  armnn::IWorkloadFactory& workloadFactory,
29  const armnn::TensorInfo& inputTensorInfo,
30  const armnn::TensorInfo& outputTensorInfo,
31  const std::vector<float>& inputValues,
32  const std::vector<float>& expectedOutputValues,
34  float qScale = 0.0f,
35  int32_t qOffset = 0)
36 {
37  IgnoreUnused(memoryManager);
38  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
39  armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
40 
41  LayerTestResult<T, 4> result(outputTensorInfo);
42  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
43  armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
44 
46  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
47  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
49 
51 
52 
53  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
54  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
55 
56  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
57 
58  inputHandle->Allocate();
59  outputHandle->Allocate();
60 
61  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
62 
63  workload->Execute();
64 
65  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
66 
67  return result;
68 }
69 
70 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
71 LayerTestResult<T, 4> InstanceNormTest(
72  armnn::IWorkloadFactory& workloadFactory,
74  armnn::DataLayout dataLayout)
75 {
76  // BatchSize: 2
77  // Height: 2
78  // Width: 2
79  // Channels: 2
80 
81  const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
82 
83  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
84  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
85 
86  std::vector<float> inputValues
87  {
88  // Batch 0, Height 0, Width 0 x Channel (2)
89  0.f, 1.f,
90  // Batch 0, Height 0, Width 1 x Channel (2)
91  0.f, 2.f,
92 
93  // Batch 0, Height 1, Width 0 x Channel (2)
94  0.f, 2.f,
95  // Batch 0, Height 1, Width 1 x Channel (2)
96  0.f, 4.f,
97 
98  // Batch 1, Height 0, Width 0 x Channel (2)
99  1.f, -1.f,
100  // Batch 1, Height 0, Width 1 x Channel (2)
101  -1.f, 2.f,
102 
103  // Batch 1, Height 1, Width 0 x Channel (2)
104  -1.f, -2.f,
105  // Batch 1, Height 1, Width 1 x Channel (2)
106  1.f, 4.f
107  };
108 
109  std::vector<float> expectedOutputValues
110  {
111  // Batch 0, Height 0, Width 0 x Channel (2)
112  0.f, -1.1470304f,
113  // Batch 0, Height 0, Width 1 x Channel (2)
114  0.f, -0.22940612f,
115  // Batch 0, Height 1, Width 0 x Channel (2)
116  0.f, -0.22940612f,
117  // Batch 0, Height 1, Width 1 x Channel (2)
118  0.f, 1.6058424f,
119 
120  // Batch 1, Height 0, Width 0 x Channel (2)
121  0.99995005f, -0.7337929f,
122  // Batch 1, Height 0, Width 1 x Channel (2)
123  -0.99995005f, 0.52413774f,
124 
125  // Batch 1, Height 1, Width 0 x Channel (2)
126  -0.99995005f, -1.1531031f,
127  // Batch 1, Height 1, Width 1 x Channel (2)
128  0.99995005f, 1.3627582f
129  };
130 
131  if (dataLayout == armnn::DataLayout::NCHW)
132  {
133  PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
134  PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
135  }
136 
138  descriptor.m_Parameters.m_Eps = 0.0001f;
139  descriptor.m_Parameters.m_Beta = 0.0f;
140  descriptor.m_Parameters.m_Gamma = 1.0f;
141  descriptor.m_Parameters.m_DataLayout = dataLayout;
142 
143  return InstanceNormTestImpl<ArmnnType>(
144  workloadFactory,
145  memoryManager,
146  inputTensorInfo,
147  outputTensorInfo,
148  inputValues,
149  expectedOutputValues,
150  descriptor);
151 }
152 
153 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
154 LayerTestResult<T, 4> InstanceNormTest2(
155  armnn::IWorkloadFactory& workloadFactory,
157  armnn::DataLayout dataLayout)
158 {
159  // BatchSize: 2
160  // Height: 2
161  // Width: 2
162  // Channels: 2
163 
164  const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
165 
166  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
167  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
168 
169  std::vector<float> inputValues
170  {
171  // Batch 0, Height 0, Width 0 x Channel (2)
172  0.f, 1.f,
173  // Batch 0, Height 0, Width 1 x Channel (2)
174  0.f, 2.f,
175 
176  // Batch 0, Height 1, Width 0 x Channel (2)
177  0.f, 2.f,
178  // Batch 0, Height 1, Width 1 x Channel (2)
179  0.f, 4.f,
180 
181  // Batch 1, Height 0, Width 0 x Channel (2)
182  1.f, -1.f,
183  // Batch 1, Height 0, Width 1 x Channel (2)
184  -1.f, 2.f,
185 
186  // Batch 1, Height 1, Width 0 x Channel (2)
187  -1.f, -2.f,
188  // Batch 1, Height 1, Width 1 x Channel (2)
189  1.f, 4.f
190  };
191 
192  std::vector<float> expectedOutputValues
193  {
194  // Batch 0, Height 0, Width 0 x Channel (2)
195  10.f, 7.7059393f,
196  // Batch 0, Height 0, Width 1 x Channel (2)
197  10.f, 9.541187f,
198 
199  // Batch 0, Height 1, Width 0 x Channel (2)
200  10.f, 9.541187f,
201  // Batch 0, Height 1, Width 1 x Channel (2)
202  10.f, 13.211685f,
203 
204  // Batch 1, Height 0, Width 0 x Channel (2)
205  11.9999f, 8.532414f,
206  // Batch 1, Height 0, Width 1 x Channel (2)
207  8.0001f, 11.048275f,
208 
209  // Batch 1, Height 1, Width 0 x Channel (2)
210  8.0001f, 7.693794f,
211  // Batch 1, Height 1, Width 1 x Channel (2)
212  11.9999f, 12.725516f
213  };
214 
215  if (dataLayout == armnn::DataLayout::NCHW)
216  {
217  PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
218  PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
219  }
220 
222  descriptor.m_Parameters.m_Eps = 0.0001f;
223  descriptor.m_Parameters.m_Beta = 10.0f;
224  descriptor.m_Parameters.m_Gamma = 2.0f;
225  descriptor.m_Parameters.m_DataLayout = dataLayout;
226 
227  return InstanceNormTestImpl<ArmnnType>(
228  workloadFactory,
229  memoryManager,
230  inputTensorInfo,
231  outputTensorInfo,
232  inputValues,
233  expectedOutputValues,
234  descriptor);
235 }
236 
237 } // anonymous namespace
238 
240  armnn::IWorkloadFactory& workloadFactory,
242  armnn::DataLayout dataLayout)
243 {
244  return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
245 }
246 
248  armnn::IWorkloadFactory& workloadFactory,
250  armnn::DataLayout dataLayout)
251 {
252  return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
253 }
254 
256  armnn::IWorkloadFactory& workloadFactory,
258  armnn::DataLayout dataLayout)
259 {
260  return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
261 }
262 
264  armnn::IWorkloadFactory& workloadFactory,
266  armnn::DataLayout dataLayout)
267 {
268  return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
269 }
DataLayout
Definition: Types.hpp:49
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
LayerTestResult< float, 4 > InstanceNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
Contains information about inputs and outputs to a layer.
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > InstanceNormFloat32Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout dataLayout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)