32 const unsigned int inputHeight = 2;
33 const unsigned int inputWidth = 2;
34 const unsigned int inputChannels = 1;
35 const unsigned int inputNum = 2;
37 unsigned int outputHeight = inputHeight;
38 unsigned int outputWidth = inputWidth;
39 unsigned int outputChannels = inputChannels;
40 unsigned int outputNum = inputNum;
42 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
43 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
48 std::vector<float> input =
58 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
59 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
64 uint32_t normSize = 3;
66 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
67 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
71 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
72 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
84 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
90 inputHandle->Allocate();
91 outputHandle->Allocate();
95 ExecuteWorkload(*workload, memoryManager);
111 float divisor[inputNum];
113 float accumulatedScale1 = 0.0f;
114 for (
size_t i = 0; i < input.size()/2; ++i)
116 accumulatedScale1 += input[i]*input[i];
119 float accumulatedScale2 = 0.0f;
120 for (
size_t i = input.size()/2; i < input.size(); ++i)
122 accumulatedScale2 += input[i]*input[i];
125 divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
126 divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
128 std::vector<float> output;
129 unsigned int divisorIndex = 0;
130 for (
size_t i = 0; i < input.size(); ++i)
132 if (i == input.size()/2)
136 output.emplace_back(input[i]/divisor[divisorIndex]);
139 expectedOutput = output;
149 std::vector<float> outputVector;
151 for (
unsigned int i = 0; i < input.size(); ++i)
153 float accumulatedScale = input[i]*input[i];
154 float scale = powf((kappa + accumulatedScale * alpha), -beta);
155 outputVector.push_back(input[i] * scale);
157 expectedOutput = outputVector;
163 "only Across and Within are supported");
172 "only LocalBrightness is supported");
178 outputHandle->GetShape(),
179 outputTensorInfo.GetShape());
189 const unsigned int inputHeight = 2;
190 const unsigned int inputWidth = 2;
191 const unsigned int inputChannels = 1;
192 const unsigned int inputNum = 2;
194 unsigned int outputHeight = inputHeight;
195 unsigned int outputWidth = inputWidth;
196 unsigned int outputChannels = inputChannels;
197 unsigned int outputNum = inputNum;
199 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
200 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
205 std::vector<float> input =
215 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
216 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
221 uint32_t normSize = 3;
223 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
224 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
228 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
229 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
241 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
247 inputHandle->Allocate();
248 outputHandle->Allocate();
252 ExecuteWorkload(*workload, memoryManager);
264 expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
265 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
271 "Only Cross-map is supported for NHWC layout");
280 "only LocalBrightness is supported");
286 outputHandle->GetShape(),
287 outputTensorInfo.GetShape());
299 constexpr
unsigned int inputNum = 5;
300 constexpr
unsigned int inputChannels = 3;
301 constexpr
unsigned int inputHeight = 32;
302 constexpr
unsigned int inputWidth = 24;
304 constexpr
unsigned int outputNum = inputNum;
305 constexpr
unsigned int outputChannels = inputChannels;
306 constexpr
unsigned int outputHeight = inputHeight;
307 constexpr
unsigned int outputWidth = inputWidth;
312 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
313 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
320 auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
322 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
323 std::vector<float> expectedOutput(outputTensorInfo.
GetNumElements());
325 constexpr
float alpha = 1.f;
326 constexpr
float beta = 1.f;
327 constexpr
float kappa = 1.f;
328 constexpr uint32_t normSize = 5;
330 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
331 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
335 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
336 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
344 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
345 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
349 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
350 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
355 ret.m_Supported = handle.IsNormalizationSupported(inputTensorInfo, outputTensorInfo, data.
m_Parameters);
357 if (!ret.m_Supported)
362 std::unique_ptr<armnn::IWorkload> workload
364 std::unique_ptr<armnn::IWorkload> workloadRef
367 outputHandleRef->Allocate();
368 inputHandleRef->Allocate();
370 inputHandle->Allocate();
371 outputHandle->Allocate();
376 ExecuteWorkload(*workload, memoryManager);
378 workloadRef->Execute();
382 ret.m_ActualData = actualOutput;
395 const unsigned int inputHeight = 1;
396 const unsigned int inputWidth = 2;
397 const unsigned int inputChannels = 3;
398 const unsigned int inputNum = 2;
400 unsigned int outputHeight = inputHeight;
401 unsigned int outputWidth = inputWidth;
402 unsigned int outputChannels = inputChannels;
403 unsigned int outputNum = inputNum;
405 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
406 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
411 std::vector<float> input =
414 -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
416 -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
419 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
420 std::vector<float> expectedOutput(outputTensorInfo.
GetNumElements());
425 uint32_t normSize = 5;
427 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
428 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
432 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
433 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
445 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
451 inputHandle->Allocate();
452 outputHandle->Allocate();
456 ExecuteWorkload(*workload, memoryManager);
468 expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
469 -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
475 "only Across and Within are supported");
484 "only LocalBrightness is supported");
490 outputHandle->GetShape(),
503 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
513 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
523 return SimpleNormalizationNhwcTestImpl(
524 workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
536 return CompareNormalizationTestImpl(
537 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
538 normChannel, normMethod);
548 return AcrossChannelNormalizationTestImpl(workloadFactory,
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
float m_K
Kappa value used for the across channel normalization equation.
const TensorShape & GetShape() const
float m_Alpha
Alpha value for the normalization equation.
NormalizationAlgorithmChannel
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< T > m_ExpectedData
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Jarret 2009: Local Contrast Normalization.
LayerTestResult< float, 4 > AcrossChannelNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
Krichevsky 2012: Local Brightness Normalization.
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
NormalizationAlgorithmMethod
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
float m_Beta
Beta value for the normalization equation.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_NormSize
Depth radius value.
unsigned int GetNumElements() const