ArmNN
 20.02
NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <armnn/Exceptions.hpp>
9 #include <armnn/LayerSupport.hpp>
10 
12 
15 
16 #include <test/TensorHelpers.hpp>
17 
18 namespace
19 {
20 
21 LayerTestResult<float,4> SimpleNormalizationTestImpl(
22  armnn::IWorkloadFactory& workloadFactory,
26 {
27  IgnoreUnused(memoryManager);
28  const unsigned int inputHeight = 2;
29  const unsigned int inputWidth = 2;
30  const unsigned int inputChannels = 1;
31  const unsigned int inputNum = 2;
32 
33  unsigned int outputHeight = inputHeight;
34  unsigned int outputWidth = inputWidth;
35  unsigned int outputChannels = inputChannels;
36  unsigned int outputNum = inputNum;
37 
38  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
39  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
40 
41  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
42  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
43 
44  LayerTestResult<float,4> ret(outputTensorInfo);
45 
46  auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
47  // Batch #0
48  1.0f, 2.0f,
49  3.0f, 4.0f,
50  // Batch #1
51  5.0f, 6.0f,
52  7.0f, 8.0f
53  }));
54 
55  float alpha = 1.f;
56  float beta = 1.f;
57  float kappa = 1.f;
58  uint32_t normSize = 3;
59 
60  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
61  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
62 
65  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
66  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
67  data.m_Parameters.m_NormChannelType = normChannel;
68  data.m_Parameters.m_NormMethodType = normMethod;
69  data.m_Parameters.m_NormSize = normSize;
70  data.m_Parameters.m_Alpha = alpha;
71  data.m_Parameters.m_Beta = beta;
72  data.m_Parameters.m_K = kappa;
74 
75  armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
77  armnn::WorkloadInfo refInfo = info;
78  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
79 
80  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
81 
82  inputHandle->Allocate();
83  outputHandle->Allocate();
84 
85  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
86 
87  ExecuteWorkload(*workload, memoryManager);
88 
89  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
90 
91  switch (normMethod)
92  {
94  {
95  switch (normChannel)
96  {
98  {
99  // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
100  // Therefore, all output values should equal the inputs, but divided by:
101  // pow((kappa + (accumulatedScale * alpha)), beta)
102  // ...where accumulatedScale is the sum of every element squared.
103  float divisor[inputNum];
104  for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
105  {
106  float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
107  input[i][0][0][1]*input[i][0][0][1] +
108  input[i][0][1][0]*input[i][0][1][0] +
109  input[i][0][1][1]*input[i][0][1][1];
110  divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
111  }
112  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
113  std::vector<float>({input[0][0][0][0]/divisor[0],
114  input[0][0][0][1]/divisor[0],
115  input[0][0][1][0]/divisor[0],
116  input[0][0][1][1]/divisor[0],
117  input[1][0][0][0]/divisor[1],
118  input[1][0][0][1]/divisor[1],
119  input[1][0][1][0]/divisor[1],
120  input[1][0][1][1]/divisor[1]}));
121  break;
122  }
124  {
125  // When normalising across channels, all output values should equal the inputs, but multiplied by:
126  // pow((kappa + (accumulatedScale * alpha)), -beta)
127  // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
128  // ...where adjacent channels means within half the normSize for the channel
129  // The test data has only one channel, so this is simplified below.
130  std::vector<float> outputVector;
131  for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
132  {
133  for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
134  {
135  for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
136  {
137  float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
138  float scale = powf((kappa + accumulatedScale * alpha), -beta);
139  outputVector.push_back(input[n][0][h][w] * scale);
140  }
141  }
142  }
143  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
144  break;
145  }
146  default:
147  {
148  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
149  "only Across and Within are supported");
150  }
151  }
152  break;
153  }
154  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
155  default:
156  {
157  throw armnn::UnimplementedException("Unsupported normalisation method type, "
158  "only LocalBrightness is supported");
159  }
160  }
161 
162  return ret;
163 }
164 
165 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
166  armnn::IWorkloadFactory& workloadFactory,
170 {
171  const unsigned int inputHeight = 2;
172  const unsigned int inputWidth = 2;
173  const unsigned int inputChannels = 1;
174  const unsigned int inputNum = 2;
175 
176  unsigned int outputHeight = inputHeight;
177  unsigned int outputWidth = inputWidth;
178  unsigned int outputChannels = inputChannels;
179  unsigned int outputNum = inputNum;
180 
181  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
182  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
183 
184  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
185  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
186 
187  LayerTestResult<float,4> ret(outputTensorInfo);
188 
189  auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
190  // Batch #0
191  1.0f, 2.0f,
192  3.0f, 4.0f,
193  // Batch #1
194  5.0f, 6.0f,
195  7.0f, 8.0f
196  }));
197 
198  float alpha = 1.f;
199  float beta = 1.f;
200  float kappa = 1.f;
201  uint32_t normSize = 3;
202 
203  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
204  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
205 
207  armnn::WorkloadInfo info;
208  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
209  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
210  data.m_Parameters.m_NormChannelType = normChannel;
211  data.m_Parameters.m_NormMethodType = normMethod;
212  data.m_Parameters.m_NormSize = normSize;
213  data.m_Parameters.m_Alpha = alpha;
214  data.m_Parameters.m_Beta = beta;
215  data.m_Parameters.m_K = kappa;
217 
218  armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
220  armnn::WorkloadInfo refInfo = info;
221  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
222 
223  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
224 
225  inputHandle->Allocate();
226  outputHandle->Allocate();
227 
228  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
229 
230  ExecuteWorkload(*workload, memoryManager);
231 
232  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
233 
234  switch (normMethod)
235  {
237  {
238  switch (normChannel)
239  {
241  {
242  std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
243  0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
244  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
245  break;
246  }
247  default:
248  {
249  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
250  "Only Cross-map is supported for NHWC layout");
251  }
252  }
253  break;
254  }
255  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
256  default:
257  {
258  throw armnn::UnimplementedException("Unsupported normalisation method type, "
259  "only LocalBrightness is supported");
260  }
261  }
262 
263  return ret;
264 }
265 
266 LayerTestResult<float,4> CompareNormalizationTestImpl(
267  armnn::IWorkloadFactory& workloadFactory,
269  armnn::IWorkloadFactory& refWorkloadFactory,
272 {
273  constexpr unsigned int inputNum = 5;
274  constexpr unsigned int inputChannels = 3;
275  constexpr unsigned int inputHeight = 32;
276  constexpr unsigned int inputWidth = 24;
277 
278  constexpr unsigned int outputNum = inputNum;
279  constexpr unsigned int outputChannels = inputChannels;
280  constexpr unsigned int outputHeight = inputHeight;
281  constexpr unsigned int outputWidth = inputWidth;
282 
283  armnn::TensorInfo inputTensorInfo;
284  armnn::TensorInfo outputTensorInfo;
285 
286  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
287  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
288 
289  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
290  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
291 
292  LayerTestResult<float,4> ret(outputTensorInfo);
293 
294  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
295 
296  constexpr float alpha = 1.f;
297  constexpr float beta = 1.f;
298  constexpr float kappa = 1.f;
299  constexpr uint32_t normSize = 5;
300 
301  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
302  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
303 
305  armnn::WorkloadInfo info;
306  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
307  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
308  data.m_Parameters.m_NormChannelType = normChannel;
309  data.m_Parameters.m_NormMethodType = normMethod;
310  data.m_Parameters.m_NormSize = normSize;
311  data.m_Parameters.m_Alpha = alpha;
312  data.m_Parameters.m_Beta = beta;
313  data.m_Parameters.m_K = kappa;
314 
315  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
316  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
317 
319  armnn::WorkloadInfo refInfo = info;
320  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
321  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
322 
323  // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
324  armnn::BackendId backend = workloadFactory.GetBackendId();
325  const size_t reasonIfUnsupportedMaxLen = 255;
326  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
327  ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
328  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
329  if (!ret.supported)
330  {
331  return ret;
332  }
333 
334  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
335  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
336 
337  outputHandleRef->Allocate();
338  inputHandleRef->Allocate();
339 
340  inputHandle->Allocate();
341  outputHandle->Allocate();
342 
343  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
344  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
345 
346  ExecuteWorkload(*workload, memoryManager);
347 
348  workloadRef->Execute();
349 
350  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
351  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
352 
353  return ret;
354 }
355 
356 } // anonymous namespace
357 
359  armnn::IWorkloadFactory& workloadFactory,
361 {
364  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
365 }
366 
368  armnn::IWorkloadFactory& workloadFactory,
370 {
373  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
374 }
375 
377  armnn::IWorkloadFactory& workloadFactory,
379 {
382  return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
383 }
384 
386  armnn::IWorkloadFactory& workloadFactory,
388  armnn::IWorkloadFactory& refWorkloadFactory,
391 {
392  return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
393 }
virtual const BackendId & GetBackendId() const =0
float m_K
Kappa value used for the across channel normalization equation.
float m_Alpha
Alpha value for the normalization equation.
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
NormalizationAlgorithmChannel
Definition: Types.hpp:126
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Jarret 2009: Local Contrast Normalization.
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
Definition: Types.hpp:132
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)