28 const unsigned int inputHeight = 2;
29 const unsigned int inputWidth = 2;
30 const unsigned int inputChannels = 1;
31 const unsigned int inputNum = 2;
33 unsigned int outputHeight = inputHeight;
34 unsigned int outputWidth = inputWidth;
35 unsigned int outputChannels = inputChannels;
36 unsigned int outputNum = inputNum;
38 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
39 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
46 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
58 uint32_t normSize = 3;
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
67 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
69 data.m_Parameters.m_NormChannelType = normChannel;
70 data.m_Parameters.m_NormMethodType = normMethod;
71 data.m_Parameters.m_NormSize = normSize;
72 data.m_Parameters.m_Alpha = alpha;
73 data.m_Parameters.m_Beta = beta;
74 data.m_Parameters.m_K = kappa;
80 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
82 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
84 inputHandle->Allocate();
85 outputHandle->Allocate();
89 ExecuteWorkload(*workload, memoryManager);
105 float divisor[inputNum];
106 for(
int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
108 float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
109 input[i][0][0][1]*input[i][0][0][1] +
110 input[i][0][1][0]*input[i][0][1][0] +
111 input[i][0][1][1]*input[i][0][1][1];
112 divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
114 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
115 std::vector<float>({input[0][0][0][0]/divisor[0],
116 input[0][0][0][1]/divisor[0],
117 input[0][0][1][0]/divisor[0],
118 input[0][0][1][1]/divisor[0],
119 input[1][0][0][0]/divisor[1],
120 input[1][0][0][1]/divisor[1],
121 input[1][0][1][0]/divisor[1],
122 input[1][0][1][1]/divisor[1]}));
132 std::vector<float> outputVector;
133 for (
int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
135 for (
int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
137 for (
int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
139 float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
140 float scale = powf((kappa + accumulatedScale * alpha), -beta);
141 outputVector.push_back(input[n][0][h][w] * scale);
145 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
151 "only Across and Within are supported");
160 "only LocalBrightness is supported");
173 const unsigned int inputHeight = 2;
174 const unsigned int inputWidth = 2;
175 const unsigned int inputChannels = 1;
176 const unsigned int inputNum = 2;
178 unsigned int outputHeight = inputHeight;
179 unsigned int outputWidth = inputWidth;
180 unsigned int outputChannels = inputChannels;
181 unsigned int outputNum = inputNum;
183 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
184 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
191 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
203 uint32_t normSize = 3;
206 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
207 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
212 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
213 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
214 data.m_Parameters.m_NormChannelType = normChannel;
215 data.m_Parameters.m_NormMethodType = normMethod;
216 data.m_Parameters.m_NormSize = normSize;
217 data.m_Parameters.m_Alpha = alpha;
218 data.m_Parameters.m_Beta = beta;
219 data.m_Parameters.m_K = kappa;
225 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
227 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
229 inputHandle->Allocate();
230 outputHandle->Allocate();
234 ExecuteWorkload(*workload, memoryManager);
246 std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
247 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
248 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
254 "Only Cross-map is supported for NHWC layout");
263 "only LocalBrightness is supported");
277 constexpr
unsigned int inputNum = 5;
278 constexpr
unsigned int inputChannels = 3;
279 constexpr
unsigned int inputHeight = 32;
280 constexpr
unsigned int inputWidth = 24;
282 constexpr
unsigned int outputNum = inputNum;
283 constexpr
unsigned int outputChannels = inputChannels;
284 constexpr
unsigned int outputHeight = inputHeight;
285 constexpr
unsigned int outputWidth = inputWidth;
290 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
291 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
298 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
300 constexpr
float alpha = 1.f;
301 constexpr
float beta = 1.f;
302 constexpr
float kappa = 1.f;
303 constexpr uint32_t normSize = 5;
306 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
307 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
312 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
313 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
314 data.m_Parameters.m_NormChannelType = normChannel;
315 data.m_Parameters.m_NormMethodType = normMethod;
316 data.m_Parameters.m_NormSize = normSize;
317 data.m_Parameters.m_Alpha = alpha;
318 data.m_Parameters.m_Beta = beta;
319 data.m_Parameters.m_K = kappa;
322 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
323 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
328 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
329 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
333 const size_t reasonIfUnsupportedMaxLen = 255;
334 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
336 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
342 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateNormalization(data, info);
343 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateNormalization(refData, refInfo);
345 outputHandleRef->Allocate();
346 inputHandleRef->Allocate();
348 inputHandle->Allocate();
349 outputHandle->Allocate();
354 ExecuteWorkload(*workload, memoryManager);
356 workloadRef->Execute();
372 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
381 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
390 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
400 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
virtual const BackendId & GetBackendId() const =0
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< float, 4 > SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
NormalizationAlgorithmChannel
LayerTestResult< float, 4 > SimpleNormalizationAcrossTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Jarret 2009: Local Contrast Normalization.
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > SimpleNormalizationWithinTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
LayerTestResult< float, 4 > CompareNormalizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)