ArmNN
 21.08
NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <armnn/Exceptions.hpp>
9 #include <armnn/LayerSupport.hpp>
10 
12 
14 
17 
18 #include <test/TensorHelpers.hpp>
19 
20 namespace
21 {
22 
23 LayerTestResult<float,4> SimpleNormalizationTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::ITensorHandleFactory& tensorHandleFactory,
29 {
30  IgnoreUnused(memoryManager);
31  const unsigned int inputHeight = 2;
32  const unsigned int inputWidth = 2;
33  const unsigned int inputChannels = 1;
34  const unsigned int inputNum = 2;
35 
36  unsigned int outputHeight = inputHeight;
37  unsigned int outputWidth = inputWidth;
38  unsigned int outputChannels = inputChannels;
39  unsigned int outputNum = inputNum;
40 
41  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
42  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
43 
44  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
45  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
46 
47  std::vector<float> input =
48  {
49  // Batch #0
50  1.0f, 2.0f,
51  3.0f, 4.0f,
52  // Batch #1
53  5.0f, 6.0f,
54  7.0f, 8.0f
55  };
56 
57  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
58  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
59 
60  float alpha = 1.f;
61  float beta = 1.f;
62  float kappa = 1.f;
63  uint32_t normSize = 3;
64 
65  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
66  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
67 
70  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
71  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
72  data.m_Parameters.m_NormChannelType = normChannel;
73  data.m_Parameters.m_NormMethodType = normMethod;
74  data.m_Parameters.m_NormSize = normSize;
75  data.m_Parameters.m_Alpha = alpha;
76  data.m_Parameters.m_Beta = beta;
77  data.m_Parameters.m_K = kappa;
79 
80  armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
82  armnn::WorkloadInfo refInfo = info;
83  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
84 
85  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
86 
87  inputHandle->Allocate();
88  outputHandle->Allocate();
89 
90  CopyDataToITensorHandle(inputHandle.get(), input.data());
91 
92  ExecuteWorkload(*workload, memoryManager);
93 
94  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
95 
96  switch (normMethod)
97  {
99  {
100  switch (normChannel)
101  {
103  {
104  // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
105  // Therefore, all output values should equal the inputs, but divided by:
106  // pow((kappa + (accumulatedScale * alpha)), beta)
107  // ...where accumulatedScale is the sum of every element squared.
108  float divisor[inputNum];
109 
110  float accumulatedScale1 = 0.0f;
111  for (size_t i = 0; i < input.size()/2; ++i)
112  {
113  accumulatedScale1 += input[i]*input[i];
114  }
115 
116  float accumulatedScale2 = 0.0f;
117  for (size_t i = input.size()/2; i < input.size(); ++i)
118  {
119  accumulatedScale2 += input[i]*input[i];
120  }
121 
122  divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
123  divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
124 
125  std::vector<float> output;
126  unsigned int divisorIndex = 0;
127  for (size_t i = 0; i < input.size(); ++i)
128  {
129  if (i == input.size()/2)
130  {
131  divisorIndex++;
132  }
133  output.emplace_back(input[i]/divisor[divisorIndex]);
134  }
135 
136  expectedOutput = output;
137  break;
138  }
140  {
141  // When normalising across channels, all output values should equal the inputs, but multiplied by:
142  // pow((kappa + (accumulatedScale * alpha)), -beta)
143  // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
144  // ...where adjacent channels means within half the normSize for the channel
145  // The test data has only one channel, so this is simplified below.
146  std::vector<float> outputVector;
147 
148  for (unsigned int i = 0; i < input.size(); ++i)
149  {
150  float accumulatedScale = input[i]*input[i];
151  float scale = powf((kappa + accumulatedScale * alpha), -beta);
152  outputVector.push_back(input[i] * scale);
153  }
154  expectedOutput = outputVector;
155  break;
156  }
157  default:
158  {
159  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
160  "only Across and Within are supported");
161  }
162  }
163  break;
164  }
165  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
166  default:
167  {
168  throw armnn::UnimplementedException("Unsupported normalisation method type, "
169  "only LocalBrightness is supported");
170  }
171  }
172 
173  return LayerTestResult<float, 4>(actualOutput,
174  expectedOutput,
175  outputHandle->GetShape(),
176  outputTensorInfo.GetShape());
177 }
178 
179 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
180  armnn::IWorkloadFactory& workloadFactory,
182  const armnn::ITensorHandleFactory& tensorHandleFactory,
185 {
186  const unsigned int inputHeight = 2;
187  const unsigned int inputWidth = 2;
188  const unsigned int inputChannels = 1;
189  const unsigned int inputNum = 2;
190 
191  unsigned int outputHeight = inputHeight;
192  unsigned int outputWidth = inputWidth;
193  unsigned int outputChannels = inputChannels;
194  unsigned int outputNum = inputNum;
195 
196  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
197  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
198 
199  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
200  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
201 
202  std::vector<float> input =
203  {
204  // Batch #0
205  1.0f, 2.0f,
206  3.0f, 4.0f,
207  // Batch #1
208  5.0f, 6.0f,
209  7.0f, 8.0f
210  };
211 
212  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
213  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
214 
215  float alpha = 1.f;
216  float beta = 1.f;
217  float kappa = 1.f;
218  uint32_t normSize = 3;
219 
220  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
221  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
222 
224  armnn::WorkloadInfo info;
225  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
226  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
227  data.m_Parameters.m_NormChannelType = normChannel;
228  data.m_Parameters.m_NormMethodType = normMethod;
229  data.m_Parameters.m_NormSize = normSize;
230  data.m_Parameters.m_Alpha = alpha;
231  data.m_Parameters.m_Beta = beta;
232  data.m_Parameters.m_K = kappa;
234 
235  armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
237  armnn::WorkloadInfo refInfo = info;
238  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
239 
240  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
241 
242  inputHandle->Allocate();
243  outputHandle->Allocate();
244 
245  CopyDataToITensorHandle(inputHandle.get(), input.data());
246 
247  ExecuteWorkload(*workload, memoryManager);
248 
249  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
250 
251  switch (normMethod)
252  {
254  {
255  switch (normChannel)
256  {
258  {
259  expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
260  0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
261  break;
262  }
263  default:
264  {
265  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
266  "Only Cross-map is supported for NHWC layout");
267  }
268  }
269  break;
270  }
271  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
272  default:
273  {
274  throw armnn::UnimplementedException("Unsupported normalisation method type, "
275  "only LocalBrightness is supported");
276  }
277  }
278 
279  return LayerTestResult<float, 4>(actualOutput,
280  expectedOutput,
281  outputHandle->GetShape(),
282  outputTensorInfo.GetShape());
283 }
284 
285 LayerTestResult<float,4> CompareNormalizationTestImpl(
286  armnn::IWorkloadFactory& workloadFactory,
288  armnn::IWorkloadFactory& refWorkloadFactory,
289  const armnn::ITensorHandleFactory& tensorHandleFactory,
290  const armnn::ITensorHandleFactory& refTensorHandleFactory,
293 {
294  constexpr unsigned int inputNum = 5;
295  constexpr unsigned int inputChannels = 3;
296  constexpr unsigned int inputHeight = 32;
297  constexpr unsigned int inputWidth = 24;
298 
299  constexpr unsigned int outputNum = inputNum;
300  constexpr unsigned int outputChannels = inputChannels;
301  constexpr unsigned int outputHeight = inputHeight;
302  constexpr unsigned int outputWidth = inputWidth;
303 
304  armnn::TensorInfo inputTensorInfo;
305  armnn::TensorInfo outputTensorInfo;
306 
307  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
308  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
309 
310  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
311  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
312 
313  LayerTestResult<float,4> ret(outputTensorInfo);
314 
315  auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
316 
317  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
318  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
319 
320  constexpr float alpha = 1.f;
321  constexpr float beta = 1.f;
322  constexpr float kappa = 1.f;
323  constexpr uint32_t normSize = 5;
324 
325  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
326  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
327 
329  armnn::WorkloadInfo info;
330  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
331  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
332  data.m_Parameters.m_NormChannelType = normChannel;
333  data.m_Parameters.m_NormMethodType = normMethod;
334  data.m_Parameters.m_NormSize = normSize;
335  data.m_Parameters.m_Alpha = alpha;
336  data.m_Parameters.m_Beta = beta;
337  data.m_Parameters.m_K = kappa;
338 
339  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
340  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
341 
343  armnn::WorkloadInfo refInfo = info;
344  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
345  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
346 
347  // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
348  armnn::BackendId backend = workloadFactory.GetBackendId();
349  const size_t reasonIfUnsupportedMaxLen = 255;
350  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
351  ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
352  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
353  if (!ret.m_Supported)
354  {
355  return ret;
356  }
357 
358  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
359  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
360 
361  outputHandleRef->Allocate();
362  inputHandleRef->Allocate();
363 
364  inputHandle->Allocate();
365  outputHandle->Allocate();
366 
367  CopyDataToITensorHandle(inputHandle.get(), input.data());
368  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
369 
370  ExecuteWorkload(*workload, memoryManager);
371 
372  workloadRef->Execute();
373 
374  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
375  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
376  ret.m_ActualData = actualOutput;
377  ret.m_ExpectedData = expectedOutput;
378 
379  return ret;
380 }
381 
382 LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
383  armnn::IWorkloadFactory& workloadFactory,
385  const armnn::ITensorHandleFactory& tensorHandleFactory,
388 {
389  const unsigned int inputHeight = 1;
390  const unsigned int inputWidth = 2;
391  const unsigned int inputChannels = 3;
392  const unsigned int inputNum = 2;
393 
394  unsigned int outputHeight = inputHeight;
395  unsigned int outputWidth = inputWidth;
396  unsigned int outputChannels = inputChannels;
397  unsigned int outputNum = inputNum;
398 
399  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
400  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
401 
402  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
403  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
404 
405  std::vector<float> input =
406  {
407  // Batch #0
408  -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
409  // Batch #1
410  -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
411  };
412 
413  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
414  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
415 
416  float alpha = 4.f;
417  float beta = 0.5f;
418  float kappa = 9.f;
419  uint32_t normSize = 5;
420 
421  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
422  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
423 
425  armnn::WorkloadInfo info;
426  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
427  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
428  data.m_Parameters.m_NormChannelType = normChannel;
429  data.m_Parameters.m_NormMethodType = normMethod;
430  data.m_Parameters.m_NormSize = normSize;
431  data.m_Parameters.m_Alpha = alpha;
432  data.m_Parameters.m_Beta = beta;
433  data.m_Parameters.m_K = kappa;
435 
436  armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
438  armnn::WorkloadInfo refInfo = info;
439  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
440 
441  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
442 
443  inputHandle->Allocate();
444  outputHandle->Allocate();
445 
446  CopyDataToITensorHandle(inputHandle.get(), input.data());
447 
448  ExecuteWorkload(*workload, memoryManager);
449 
450  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
451 
452  switch (normMethod)
453  {
455  {
456  switch (normChannel)
457  {
459  {
460  expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
461  -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
462  break;
463  }
464  default:
465  {
466  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
467  "only Across and Within are supported");
468  }
469  }
470  break;
471  }
472  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
473  default:
474  {
475  throw armnn::UnimplementedException("Unsupported normalisation method type, "
476  "only LocalBrightness is supported");
477  }
478  }
479 
480  return LayerTestResult<float, 4>(actualOutput,
481  expectedOutput,
482  outputHandle->GetShape(),
483  outputTensorInfo.GetShape());
484 }
485 
486 } // anonymous namespace
487 
489  armnn::IWorkloadFactory& workloadFactory,
491  const armnn::ITensorHandleFactory& tensorHandleFactory)
492 {
495  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
496 }
497 
499  armnn::IWorkloadFactory& workloadFactory,
501  const armnn::ITensorHandleFactory& tensorHandleFactory)
502 {
505  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
506 }
507 
509  armnn::IWorkloadFactory& workloadFactory,
511  const armnn::ITensorHandleFactory& tensorHandleFactory)
512 {
515  return SimpleNormalizationNhwcTestImpl(
516  workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
517 }
518 
520  armnn::IWorkloadFactory& workloadFactory,
522  armnn::IWorkloadFactory& refWorkloadFactory,
523  const armnn::ITensorHandleFactory& tensorHandleFactory,
524  const armnn::ITensorHandleFactory& refTensorHandleFactory,
527 {
528  return CompareNormalizationTestImpl(
529  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
530  normChannel, normMethod);
531 }
532 
534  armnn::IWorkloadFactory& workloadFactory,
536  const armnn::ITensorHandleFactory& tensorHandleFactory)
537 {
540  return AcrossChannelNormalizationTestImpl(workloadFactory,
541  memoryManager,
542  tensorHandleFactory,
543  normChannel,
544  normMethod);
545 }
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
float m_K
Kappa value used for the across channel normalization equation.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
float m_Alpha
Alpha value for the normalization equation.
NormalizationAlgorithmChannel
Definition: Types.hpp:161
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< T > m_ExpectedData
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Jarret 2009: Local Contrast Normalization.
LayerTestResult< float, 4 > AcrossChannelNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
Definition: Types.hpp:167
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
float m_Beta
Beta value for the normalization equation.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_NormSize
Depth radius value.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)