ArmNN
 22.05.01
NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <armnn/Exceptions.hpp>
9 
11 
14 #include <armnn/BackendHelper.hpp>
15 
18 
20 
21 namespace
22 {
23 
24 LayerTestResult<float,4> SimpleNormalizationTestImpl(
25  armnn::IWorkloadFactory& workloadFactory,
27  const armnn::ITensorHandleFactory& tensorHandleFactory,
30 {
31  IgnoreUnused(memoryManager);
32  const unsigned int inputHeight = 2;
33  const unsigned int inputWidth = 2;
34  const unsigned int inputChannels = 1;
35  const unsigned int inputNum = 2;
36 
37  unsigned int outputHeight = inputHeight;
38  unsigned int outputWidth = inputWidth;
39  unsigned int outputChannels = inputChannels;
40  unsigned int outputNum = inputNum;
41 
42  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
43  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
44 
45  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
46  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
47 
48  std::vector<float> input =
49  {
50  // Batch #0
51  1.0f, 2.0f,
52  3.0f, 4.0f,
53  // Batch #1
54  5.0f, 6.0f,
55  7.0f, 8.0f
56  };
57 
58  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
59  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
60 
61  float alpha = 1.f;
62  float beta = 1.f;
63  float kappa = 1.f;
64  uint32_t normSize = 3;
65 
66  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
67  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
68 
71  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
72  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
73  data.m_Parameters.m_NormChannelType = normChannel;
74  data.m_Parameters.m_NormMethodType = normMethod;
75  data.m_Parameters.m_NormSize = normSize;
76  data.m_Parameters.m_Alpha = alpha;
77  data.m_Parameters.m_Beta = beta;
78  data.m_Parameters.m_K = kappa;
80 
81  armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
83  armnn::WorkloadInfo refInfo = info;
84  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
85 
86  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
87  data,
88  info);
89 
90  inputHandle->Allocate();
91  outputHandle->Allocate();
92 
93  CopyDataToITensorHandle(inputHandle.get(), input.data());
94 
95  ExecuteWorkload(*workload, memoryManager);
96 
97  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
98 
99  switch (normMethod)
100  {
102  {
103  switch (normChannel)
104  {
106  {
107  // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
108  // Therefore, all output values should equal the inputs, but divided by:
109  // pow((kappa + (accumulatedScale * alpha)), beta)
110  // ...where accumulatedScale is the sum of every element squared.
111  float divisor[inputNum];
112 
113  float accumulatedScale1 = 0.0f;
114  for (size_t i = 0; i < input.size()/2; ++i)
115  {
116  accumulatedScale1 += input[i]*input[i];
117  }
118 
119  float accumulatedScale2 = 0.0f;
120  for (size_t i = input.size()/2; i < input.size(); ++i)
121  {
122  accumulatedScale2 += input[i]*input[i];
123  }
124 
125  divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
126  divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
127 
128  std::vector<float> output;
129  unsigned int divisorIndex = 0;
130  for (size_t i = 0; i < input.size(); ++i)
131  {
132  if (i == input.size()/2)
133  {
134  divisorIndex++;
135  }
136  output.emplace_back(input[i]/divisor[divisorIndex]);
137  }
138 
139  expectedOutput = output;
140  break;
141  }
143  {
144  // When normalising across channels, all output values should equal the inputs, but multiplied by:
145  // pow((kappa + (accumulatedScale * alpha)), -beta)
146  // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
147  // ...where adjacent channels means within half the normSize for the channel
148  // The test data has only one channel, so this is simplified below.
149  std::vector<float> outputVector;
150 
151  for (unsigned int i = 0; i < input.size(); ++i)
152  {
153  float accumulatedScale = input[i]*input[i];
154  float scale = powf((kappa + accumulatedScale * alpha), -beta);
155  outputVector.push_back(input[i] * scale);
156  }
157  expectedOutput = outputVector;
158  break;
159  }
160  default:
161  {
162  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
163  "only Across and Within are supported");
164  }
165  }
166  break;
167  }
168  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
169  default:
170  {
171  throw armnn::UnimplementedException("Unsupported normalisation method type, "
172  "only LocalBrightness is supported");
173  }
174  }
175 
176  return LayerTestResult<float, 4>(actualOutput,
177  expectedOutput,
178  outputHandle->GetShape(),
179  outputTensorInfo.GetShape());
180 }
181 
182 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
183  armnn::IWorkloadFactory& workloadFactory,
185  const armnn::ITensorHandleFactory& tensorHandleFactory,
188 {
189  const unsigned int inputHeight = 2;
190  const unsigned int inputWidth = 2;
191  const unsigned int inputChannels = 1;
192  const unsigned int inputNum = 2;
193 
194  unsigned int outputHeight = inputHeight;
195  unsigned int outputWidth = inputWidth;
196  unsigned int outputChannels = inputChannels;
197  unsigned int outputNum = inputNum;
198 
199  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
200  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
201 
202  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
203  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
204 
205  std::vector<float> input =
206  {
207  // Batch #0
208  1.0f, 2.0f,
209  3.0f, 4.0f,
210  // Batch #1
211  5.0f, 6.0f,
212  7.0f, 8.0f
213  };
214 
215  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
216  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
217 
218  float alpha = 1.f;
219  float beta = 1.f;
220  float kappa = 1.f;
221  uint32_t normSize = 3;
222 
223  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
224  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
225 
227  armnn::WorkloadInfo info;
228  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
229  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
230  data.m_Parameters.m_NormChannelType = normChannel;
231  data.m_Parameters.m_NormMethodType = normMethod;
232  data.m_Parameters.m_NormSize = normSize;
233  data.m_Parameters.m_Alpha = alpha;
234  data.m_Parameters.m_Beta = beta;
235  data.m_Parameters.m_K = kappa;
237 
238  armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
240  armnn::WorkloadInfo refInfo = info;
241  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
242 
243  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
244  data,
245  info);
246 
247  inputHandle->Allocate();
248  outputHandle->Allocate();
249 
250  CopyDataToITensorHandle(inputHandle.get(), input.data());
251 
252  ExecuteWorkload(*workload, memoryManager);
253 
254  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
255 
256  switch (normMethod)
257  {
259  {
260  switch (normChannel)
261  {
263  {
264  expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
265  0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
266  break;
267  }
268  default:
269  {
270  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
271  "Only Cross-map is supported for NHWC layout");
272  }
273  }
274  break;
275  }
276  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
277  default:
278  {
279  throw armnn::UnimplementedException("Unsupported normalisation method type, "
280  "only LocalBrightness is supported");
281  }
282  }
283 
284  return LayerTestResult<float, 4>(actualOutput,
285  expectedOutput,
286  outputHandle->GetShape(),
287  outputTensorInfo.GetShape());
288 }
289 
290 LayerTestResult<float,4> CompareNormalizationTestImpl(
291  armnn::IWorkloadFactory& workloadFactory,
293  armnn::IWorkloadFactory& refWorkloadFactory,
294  const armnn::ITensorHandleFactory& tensorHandleFactory,
295  const armnn::ITensorHandleFactory& refTensorHandleFactory,
298 {
299  constexpr unsigned int inputNum = 5;
300  constexpr unsigned int inputChannels = 3;
301  constexpr unsigned int inputHeight = 32;
302  constexpr unsigned int inputWidth = 24;
303 
304  constexpr unsigned int outputNum = inputNum;
305  constexpr unsigned int outputChannels = inputChannels;
306  constexpr unsigned int outputHeight = inputHeight;
307  constexpr unsigned int outputWidth = inputWidth;
308 
309  armnn::TensorInfo inputTensorInfo;
310  armnn::TensorInfo outputTensorInfo;
311 
312  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
313  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
314 
315  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
316  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
317 
318  LayerTestResult<float,4> ret(outputTensorInfo);
319 
320  auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
321 
322  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
323  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
324 
325  constexpr float alpha = 1.f;
326  constexpr float beta = 1.f;
327  constexpr float kappa = 1.f;
328  constexpr uint32_t normSize = 5;
329 
330  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
331  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
332 
334  armnn::WorkloadInfo info;
335  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
336  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
337  data.m_Parameters.m_NormChannelType = normChannel;
338  data.m_Parameters.m_NormMethodType = normMethod;
339  data.m_Parameters.m_NormSize = normSize;
340  data.m_Parameters.m_Alpha = alpha;
341  data.m_Parameters.m_Beta = beta;
342  data.m_Parameters.m_K = kappa;
343 
344  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
345  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
346 
348  armnn::WorkloadInfo refInfo = info;
349  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
350  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
351 
352  // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
353  armnn::BackendId backend = workloadFactory.GetBackendId();
354  auto handle = armnn::GetILayerSupportByBackendId(backend);
355  ret.m_Supported = handle.IsNormalizationSupported(inputTensorInfo, outputTensorInfo, data.m_Parameters);
356 
357  if (!ret.m_Supported)
358  {
359  return ret;
360  }
361 
362  std::unique_ptr<armnn::IWorkload> workload
363  = workloadFactory.CreateWorkload(armnn::LayerType::Normalization, data, info);
364  std::unique_ptr<armnn::IWorkload> workloadRef
365  = refWorkloadFactory.CreateWorkload(armnn::LayerType::Normalization, refData, refInfo);
366 
367  outputHandleRef->Allocate();
368  inputHandleRef->Allocate();
369 
370  inputHandle->Allocate();
371  outputHandle->Allocate();
372 
373  CopyDataToITensorHandle(inputHandle.get(), input.data());
374  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
375 
376  ExecuteWorkload(*workload, memoryManager);
377 
378  workloadRef->Execute();
379 
380  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
381  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
382  ret.m_ActualData = actualOutput;
383  ret.m_ExpectedData = expectedOutput;
384 
385  return ret;
386 }
387 
388 LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
389  armnn::IWorkloadFactory& workloadFactory,
391  const armnn::ITensorHandleFactory& tensorHandleFactory,
394 {
395  const unsigned int inputHeight = 1;
396  const unsigned int inputWidth = 2;
397  const unsigned int inputChannels = 3;
398  const unsigned int inputNum = 2;
399 
400  unsigned int outputHeight = inputHeight;
401  unsigned int outputWidth = inputWidth;
402  unsigned int outputChannels = inputChannels;
403  unsigned int outputNum = inputNum;
404 
405  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
406  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
407 
408  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
409  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
410 
411  std::vector<float> input =
412  {
413  // Batch #0
414  -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
415  // Batch #1
416  -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
417  };
418 
419  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
420  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
421 
422  float alpha = 4.f;
423  float beta = 0.5f;
424  float kappa = 9.f;
425  uint32_t normSize = 5;
426 
427  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
428  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
429 
431  armnn::WorkloadInfo info;
432  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
433  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
434  data.m_Parameters.m_NormChannelType = normChannel;
435  data.m_Parameters.m_NormMethodType = normMethod;
436  data.m_Parameters.m_NormSize = normSize;
437  data.m_Parameters.m_Alpha = alpha;
438  data.m_Parameters.m_Beta = beta;
439  data.m_Parameters.m_K = kappa;
441 
442  armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
444  armnn::WorkloadInfo refInfo = info;
445  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
446 
447  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
448  data,
449  info);
450 
451  inputHandle->Allocate();
452  outputHandle->Allocate();
453 
454  CopyDataToITensorHandle(inputHandle.get(), input.data());
455 
456  ExecuteWorkload(*workload, memoryManager);
457 
458  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
459 
460  switch (normMethod)
461  {
463  {
464  switch (normChannel)
465  {
467  {
468  expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
469  -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
470  break;
471  }
472  default:
473  {
474  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
475  "only Across and Within are supported");
476  }
477  }
478  break;
479  }
480  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
481  default:
482  {
483  throw armnn::UnimplementedException("Unsupported normalisation method type, "
484  "only LocalBrightness is supported");
485  }
486  }
487 
488  return LayerTestResult<float, 4>(actualOutput,
489  expectedOutput,
490  outputHandle->GetShape(),
491  outputTensorInfo.GetShape());
492 }
493 
494 } // anonymous namespace
495 
497  armnn::IWorkloadFactory& workloadFactory,
499  const armnn::ITensorHandleFactory& tensorHandleFactory)
500 {
503  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
504 }
505 
507  armnn::IWorkloadFactory& workloadFactory,
509  const armnn::ITensorHandleFactory& tensorHandleFactory)
510 {
513  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
514 }
515 
517  armnn::IWorkloadFactory& workloadFactory,
519  const armnn::ITensorHandleFactory& tensorHandleFactory)
520 {
523  return SimpleNormalizationNhwcTestImpl(
524  workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
525 }
526 
528  armnn::IWorkloadFactory& workloadFactory,
530  armnn::IWorkloadFactory& refWorkloadFactory,
531  const armnn::ITensorHandleFactory& tensorHandleFactory,
532  const armnn::ITensorHandleFactory& refTensorHandleFactory,
535 {
536  return CompareNormalizationTestImpl(
537  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
538  normChannel, normMethod);
539 }
540 
542  armnn::IWorkloadFactory& workloadFactory,
544  const armnn::ITensorHandleFactory& tensorHandleFactory)
545 {
548  return AcrossChannelNormalizationTestImpl(workloadFactory,
549  memoryManager,
550  tensorHandleFactory,
551  normChannel,
552  normMethod);
553 }
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
float m_K
Kappa value used for the across channel normalization equation.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
float m_Alpha
Alpha value for the normalization equation.
NormalizationAlgorithmChannel
Definition: Types.hpp:193
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void IgnoreUnused(Ts &&...)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< T > m_ExpectedData
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Jarret 2009: Local Contrast Normalization.
LayerTestResult< float, 4 > AcrossChannelNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
Krichevsky 2012: Local Brightness Normalization.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
NormalizationAlgorithmMethod
Definition: Types.hpp:199
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
float m_Beta
Beta value for the normalization equation.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_NormSize
Depth radius value.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196