ArmNN
 20.08
NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <armnn/Exceptions.hpp>
9 #include <armnn/LayerSupport.hpp>
10 
12 
15 
16 #include <test/TensorHelpers.hpp>
17 
18 namespace
19 {
20 
21 LayerTestResult<float,4> SimpleNormalizationTestImpl(
22  armnn::IWorkloadFactory& workloadFactory,
26 {
27  IgnoreUnused(memoryManager);
28  const unsigned int inputHeight = 2;
29  const unsigned int inputWidth = 2;
30  const unsigned int inputChannels = 1;
31  const unsigned int inputNum = 2;
32 
33  unsigned int outputHeight = inputHeight;
34  unsigned int outputWidth = inputWidth;
35  unsigned int outputChannels = inputChannels;
36  unsigned int outputNum = inputNum;
37 
38  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
39  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
40 
41  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
42  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
43 
44  LayerTestResult<float,4> ret(outputTensorInfo);
45 
46  auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
47  // Batch #0
48  1.0f, 2.0f,
49  3.0f, 4.0f,
50  // Batch #1
51  5.0f, 6.0f,
52  7.0f, 8.0f
53  }));
54 
55  float alpha = 1.f;
56  float beta = 1.f;
57  float kappa = 1.f;
58  uint32_t normSize = 3;
59 
61  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
64 
67  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
68  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
69  data.m_Parameters.m_NormChannelType = normChannel;
70  data.m_Parameters.m_NormMethodType = normMethod;
71  data.m_Parameters.m_NormSize = normSize;
72  data.m_Parameters.m_Alpha = alpha;
73  data.m_Parameters.m_Beta = beta;
74  data.m_Parameters.m_K = kappa;
75  data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
76 
77  armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
79  armnn::WorkloadInfo refInfo = info;
80  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
81 
82  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
83 
84  inputHandle->Allocate();
85  outputHandle->Allocate();
86 
87  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
88 
89  ExecuteWorkload(*workload, memoryManager);
90 
91  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
92 
93  switch (normMethod)
94  {
96  {
97  switch (normChannel)
98  {
100  {
101  // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
102  // Therefore, all output values should equal the inputs, but divided by:
103  // pow((kappa + (accumulatedScale * alpha)), beta)
104  // ...where accumulatedScale is the sum of every element squared.
105  float divisor[inputNum];
106  for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
107  {
108  float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
109  input[i][0][0][1]*input[i][0][0][1] +
110  input[i][0][1][0]*input[i][0][1][0] +
111  input[i][0][1][1]*input[i][0][1][1];
112  divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
113  }
114  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
115  std::vector<float>({input[0][0][0][0]/divisor[0],
116  input[0][0][0][1]/divisor[0],
117  input[0][0][1][0]/divisor[0],
118  input[0][0][1][1]/divisor[0],
119  input[1][0][0][0]/divisor[1],
120  input[1][0][0][1]/divisor[1],
121  input[1][0][1][0]/divisor[1],
122  input[1][0][1][1]/divisor[1]}));
123  break;
124  }
126  {
127  // When normalising across channels, all output values should equal the inputs, but multiplied by:
128  // pow((kappa + (accumulatedScale * alpha)), -beta)
129  // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
130  // ...where adjacent channels means within half the normSize for the channel
131  // The test data has only one channel, so this is simplified below.
132  std::vector<float> outputVector;
133  for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
134  {
135  for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
136  {
137  for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
138  {
139  float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
140  float scale = powf((kappa + accumulatedScale * alpha), -beta);
141  outputVector.push_back(input[n][0][h][w] * scale);
142  }
143  }
144  }
145  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
146  break;
147  }
148  default:
149  {
150  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
151  "only Across and Within are supported");
152  }
153  }
154  break;
155  }
156  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
157  default:
158  {
159  throw armnn::UnimplementedException("Unsupported normalisation method type, "
160  "only LocalBrightness is supported");
161  }
162  }
163 
164  return ret;
165 }
166 
167 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
168  armnn::IWorkloadFactory& workloadFactory,
172 {
173  const unsigned int inputHeight = 2;
174  const unsigned int inputWidth = 2;
175  const unsigned int inputChannels = 1;
176  const unsigned int inputNum = 2;
177 
178  unsigned int outputHeight = inputHeight;
179  unsigned int outputWidth = inputWidth;
180  unsigned int outputChannels = inputChannels;
181  unsigned int outputNum = inputNum;
182 
183  unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
184  unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
185 
186  auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
187  auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
188 
189  LayerTestResult<float,4> ret(outputTensorInfo);
190 
191  auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
192  // Batch #0
193  1.0f, 2.0f,
194  3.0f, 4.0f,
195  // Batch #1
196  5.0f, 6.0f,
197  7.0f, 8.0f
198  }));
199 
200  float alpha = 1.f;
201  float beta = 1.f;
202  float kappa = 1.f;
203  uint32_t normSize = 3;
204 
206  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
207  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
209 
211  armnn::WorkloadInfo info;
212  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
213  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
214  data.m_Parameters.m_NormChannelType = normChannel;
215  data.m_Parameters.m_NormMethodType = normMethod;
216  data.m_Parameters.m_NormSize = normSize;
217  data.m_Parameters.m_Alpha = alpha;
218  data.m_Parameters.m_Beta = beta;
219  data.m_Parameters.m_K = kappa;
220  data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
221 
222  armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
224  armnn::WorkloadInfo refInfo = info;
225  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
226 
227  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
228 
229  inputHandle->Allocate();
230  outputHandle->Allocate();
231 
232  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
233 
234  ExecuteWorkload(*workload, memoryManager);
235 
236  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
237 
238  switch (normMethod)
239  {
241  {
242  switch (normChannel)
243  {
245  {
246  std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
247  0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
248  ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
249  break;
250  }
251  default:
252  {
253  throw armnn::UnimplementedException("Unsupported normalisation channel type, "
254  "Only Cross-map is supported for NHWC layout");
255  }
256  }
257  break;
258  }
259  case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
260  default:
261  {
262  throw armnn::UnimplementedException("Unsupported normalisation method type, "
263  "only LocalBrightness is supported");
264  }
265  }
266 
267  return ret;
268 }
269 
270 LayerTestResult<float,4> CompareNormalizationTestImpl(
271  armnn::IWorkloadFactory& workloadFactory,
273  armnn::IWorkloadFactory& refWorkloadFactory,
276 {
277  constexpr unsigned int inputNum = 5;
278  constexpr unsigned int inputChannels = 3;
279  constexpr unsigned int inputHeight = 32;
280  constexpr unsigned int inputWidth = 24;
281 
282  constexpr unsigned int outputNum = inputNum;
283  constexpr unsigned int outputChannels = inputChannels;
284  constexpr unsigned int outputHeight = inputHeight;
285  constexpr unsigned int outputWidth = inputWidth;
286 
287  armnn::TensorInfo inputTensorInfo;
288  armnn::TensorInfo outputTensorInfo;
289 
290  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
291  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
292 
293  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
294  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
295 
296  LayerTestResult<float,4> ret(outputTensorInfo);
297 
298  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
299 
300  constexpr float alpha = 1.f;
301  constexpr float beta = 1.f;
302  constexpr float kappa = 1.f;
303  constexpr uint32_t normSize = 5;
304 
306  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
307  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
309 
311  armnn::WorkloadInfo info;
312  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
313  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
314  data.m_Parameters.m_NormChannelType = normChannel;
315  data.m_Parameters.m_NormMethodType = normMethod;
316  data.m_Parameters.m_NormSize = normSize;
317  data.m_Parameters.m_Alpha = alpha;
318  data.m_Parameters.m_Beta = beta;
319  data.m_Parameters.m_K = kappa;
320 
322  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
323  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
325 
327  armnn::WorkloadInfo refInfo = info;
328  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
329  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
330 
331  // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
332  armnn::BackendId backend = workloadFactory.GetBackendId();
333  const size_t reasonIfUnsupportedMaxLen = 255;
334  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
335  ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
336  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
337  if (!ret.supported)
338  {
339  return ret;
340  }
341 
342  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
343  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
344 
345  outputHandleRef->Allocate();
346  inputHandleRef->Allocate();
347 
348  inputHandle->Allocate();
349  outputHandle->Allocate();
350 
351  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
352  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
353 
354  ExecuteWorkload(*workload, memoryManager);
355 
356  workloadRef->Execute();
357 
358  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
359  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
360 
361  return ret;
362 }
363 
364 } // anonymous namespace
365 
367  armnn::IWorkloadFactory& workloadFactory,
369 {
372  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
373 }
374 
376  armnn::IWorkloadFactory& workloadFactory,
378 {
381  return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
382 }
383 
385  armnn::IWorkloadFactory& workloadFactory,
387 {
390  return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
391 }
392 
394  armnn::IWorkloadFactory& workloadFactory,
396  armnn::IWorkloadFactory& refWorkloadFactory,
399 {
400  return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
401 }
virtual const BackendId & GetBackendId() const =0
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
NormalizationAlgorithmChannel
Definition: Types.hpp:133
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Jarret 2009: Local Contrast Normalization.
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
Definition: Types.hpp:139
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)