ArmNN
 21.02
NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <armnn/Exceptions.hpp>
9 #include <armnn/LayerSupport.hpp>
10 
12 
14 
17 
18 #include <test/TensorHelpers.hpp>
19 
20 namespace
21 {
22 
23 LayerTestResult<float,4> SimpleNormalizationTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::ITensorHandleFactory& tensorHandleFactory,
29 {
30  IgnoreUnused(memoryManager);
31  const unsigned int inputHeight = 2;
32  const unsigned int inputWidth = 2;
33  const unsigned int inputChannels = 1;
34  const unsigned int inputNum = 2;
35 
36  unsigned int outputHeight = inputHeight;
37  unsigned int outputWidth = inputWidth;
38  unsigned int outputChannels = inputChannels;
39  unsigned int outputNum = inputNum;
40 
41  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
42  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
43 
44  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
45  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
46 
47  LayerTestResult<float,4> ret(outputTensorInfo);
48 
49  auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
50  // Batch #0
51  1.0f, 2.0f,
52  3.0f, 4.0f,
53  // Batch #1
54  5.0f, 6.0f,
55  7.0f, 8.0f
56  }));
57 
58  float alpha = 1.f;
59  float beta = 1.f;
60  float kappa = 1.f;
61  uint32_t normSize = 3;
62 
63  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
64  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
65 
68  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
69  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
70  data.m_Parameters.m_NormChannelType = normChannel;
71  data.m_Parameters.m_NormMethodType = normMethod;
72  data.m_Parameters.m_NormSize = normSize;
73  data.m_Parameters.m_Alpha = alpha;
74  data.m_Parameters.m_Beta = beta;
75  data.m_Parameters.m_K = kappa;
77 
78  armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
80  armnn::WorkloadInfo refInfo = info;
81  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
82 
83  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
84 
85  inputHandle->Allocate();
86  outputHandle->Allocate();
87 
88  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
89 
90  ExecuteWorkload(*workload, memoryManager);
91 
92  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
93 
94  switch (normMethod)
95  {
97  {
98  switch (normChannel)
99  {
101  {
102  // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
103  // Therefore, all output values should equal the inputs, but divided by:
104  // pow((kappa + (accumulatedScale * alpha)), beta)
105  // ...where accumulatedScale is the sum of every element squared.
106  float divisor[inputNum];
107  for(int i = 0; i < armnn::numeric_cast<int>(inputNum); i++)
108  {
109  float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
110  input[i][0][0][1]*input[i][0][0][1] +
111  input[i][0][1][0]*input[i][0][1][0] +
112  input[i][0][1][1]*input[i][0][1][1];
113  divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
114  }
115  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
116  std::vector<float>({input[0][0][0][0]/divisor[0],
117  input[0][0][0][1]/divisor[0],
118  input[0][0][1][0]/divisor[0],
119  input[0][0][1][1]/divisor[0],
120  input[1][0][0][0]/divisor[1],
121  input[1][0][0][1]/divisor[1],
122  input[1][0][1][0]/divisor[1],
123  input[1][0][1][1]/divisor[1]}));
124  break;
125  }
127  {
128  // When normalising across channels, all output values should equal the inputs, but multiplied by:
129  // pow((kappa + (accumulatedScale * alpha)), -beta)
130  // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
131  // ...where adjacent channels means within half the normSize for the channel
132  // The test data has only one channel, so this is simplified below.
133  std::vector<float> outputVector;
134  for (int n = 0; n < armnn::numeric_cast<int>(inputNum); ++n)
135  {
136  for (int h = 0; h < armnn::numeric_cast<int>(inputHeight); ++h)
137  {
138  for (int w = 0; w < armnn::numeric_cast<int>(inputWidth); ++w)
139  {
140  float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
141  float scale = powf((kappa + accumulatedScale * alpha), -beta);
142  outputVector.push_back(input[n][0][h][w] * scale);
143  }
144  }
145  }
146  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
147  break;
148  }
149  default:
150  {
151  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
152  "only Across and Within are supported");
153  }
154  }
155  break;
156  }
157  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
158  default:
159  {
160  throw armnn::UnimplementedException("Unsupported normalisation method type, "
161  "only LocalBrightness is supported");
162  }
163  }
164 
165  return ret;
166 }
167 
168 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
169  armnn::IWorkloadFactory& workloadFactory,
171  const armnn::ITensorHandleFactory& tensorHandleFactory,
174 {
175  const unsigned int inputHeight = 2;
176  const unsigned int inputWidth = 2;
177  const unsigned int inputChannels = 1;
178  const unsigned int inputNum = 2;
179 
180  unsigned int outputHeight = inputHeight;
181  unsigned int outputWidth = inputWidth;
182  unsigned int outputChannels = inputChannels;
183  unsigned int outputNum = inputNum;
184 
185  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
186  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
187 
188  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
189  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
190 
191  LayerTestResult<float,4> ret(outputTensorInfo);
192 
193  auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
194  // Batch #0
195  1.0f, 2.0f,
196  3.0f, 4.0f,
197  // Batch #1
198  5.0f, 6.0f,
199  7.0f, 8.0f
200  }));
201 
202  float alpha = 1.f;
203  float beta = 1.f;
204  float kappa = 1.f;
205  uint32_t normSize = 3;
206 
207  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
208  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
209 
211  armnn::WorkloadInfo info;
212  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
213  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
214  data.m_Parameters.m_NormChannelType = normChannel;
215  data.m_Parameters.m_NormMethodType = normMethod;
216  data.m_Parameters.m_NormSize = normSize;
217  data.m_Parameters.m_Alpha = alpha;
218  data.m_Parameters.m_Beta = beta;
219  data.m_Parameters.m_K = kappa;
221 
222  armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
224  armnn::WorkloadInfo refInfo = info;
225  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
226 
227  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
228 
229  inputHandle->Allocate();
230  outputHandle->Allocate();
231 
232  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
233 
234  ExecuteWorkload(*workload, memoryManager);
235 
236  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
237 
238  switch (normMethod)
239  {
241  {
242  switch (normChannel)
243  {
245  {
246  std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
247  0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
248  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
249  break;
250  }
251  default:
252  {
253  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
254  "Only Cross-map is supported for NHWC layout");
255  }
256  }
257  break;
258  }
259  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
260  default:
261  {
262  throw armnn::UnimplementedException("Unsupported normalisation method type, "
263  "only LocalBrightness is supported");
264  }
265  }
266 
267  return ret;
268 }
269 
270 LayerTestResult<float,4> CompareNormalizationTestImpl(
271  armnn::IWorkloadFactory& workloadFactory,
273  armnn::IWorkloadFactory& refWorkloadFactory,
274  const armnn::ITensorHandleFactory& tensorHandleFactory,
275  const armnn::ITensorHandleFactory& refTensorHandleFactory,
278 {
279  constexpr unsigned int inputNum = 5;
280  constexpr unsigned int inputChannels = 3;
281  constexpr unsigned int inputHeight = 32;
282  constexpr unsigned int inputWidth = 24;
283 
284  constexpr unsigned int outputNum = inputNum;
285  constexpr unsigned int outputChannels = inputChannels;
286  constexpr unsigned int outputHeight = inputHeight;
287  constexpr unsigned int outputWidth = inputWidth;
288 
289  armnn::TensorInfo inputTensorInfo;
290  armnn::TensorInfo outputTensorInfo;
291 
292  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
293  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
294 
295  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
296  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
297 
298  LayerTestResult<float,4> ret(outputTensorInfo);
299 
300  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
301 
302  constexpr float alpha = 1.f;
303  constexpr float beta = 1.f;
304  constexpr float kappa = 1.f;
305  constexpr uint32_t normSize = 5;
306 
307  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
308  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
309 
311  armnn::WorkloadInfo info;
312  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
313  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
314  data.m_Parameters.m_NormChannelType = normChannel;
315  data.m_Parameters.m_NormMethodType = normMethod;
316  data.m_Parameters.m_NormSize = normSize;
317  data.m_Parameters.m_Alpha = alpha;
318  data.m_Parameters.m_Beta = beta;
319  data.m_Parameters.m_K = kappa;
320 
321  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
322  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
323 
325  armnn::WorkloadInfo refInfo = info;
326  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
327  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
328 
329  // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
330  armnn::BackendId backend = workloadFactory.GetBackendId();
331  const size_t reasonIfUnsupportedMaxLen = 255;
332  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
333  ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
334  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
335  if (!ret.supported)
336  {
337  return ret;
338  }
339 
340  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
341  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
342 
343  outputHandleRef->Allocate();
344  inputHandleRef->Allocate();
345 
346  inputHandle->Allocate();
347  outputHandle->Allocate();
348 
349  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
350  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
351 
352  ExecuteWorkload(*workload, memoryManager);
353 
354  workloadRef->Execute();
355 
356  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
357  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
358 
359  return ret;
360 }
361 
362 } // anonymous namespace
363 
365  armnn::IWorkloadFactory& workloadFactory,
367  const armnn::ITensorHandleFactory& tensorHandleFactory)
368 {
371  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
372 }
373 
375  armnn::IWorkloadFactory& workloadFactory,
377  const armnn::ITensorHandleFactory& tensorHandleFactory)
378 {
381  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
382 }
383 
385  armnn::IWorkloadFactory& workloadFactory,
387  const armnn::ITensorHandleFactory& tensorHandleFactory)
388 {
391  return SimpleNormalizationNhwcTestImpl(
392  workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
393 }
394 
396  armnn::IWorkloadFactory& workloadFactory,
398  armnn::IWorkloadFactory& refWorkloadFactory,
399  const armnn::ITensorHandleFactory& tensorHandleFactory,
400  const armnn::ITensorHandleFactory& refTensorHandleFactory,
403 {
404  return CompareNormalizationTestImpl(
405  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
406  normChannel, normMethod);
407 }
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
float m_K
Kappa value used for the across channel normalization equation.
float m_Alpha
Alpha value for the normalization equation.
NormalizationAlgorithmChannel
Definition: Types.hpp:149
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Jarret 2009: Local Contrast Normalization.
Contains information about inputs and outputs to a layer.
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
Definition: Types.hpp:155
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
float m_Beta
Beta value for the normalization equation.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_NormSize
Depth radius value.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)