28 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
33 const std::vector<float>& inputValues,
34 const std::vector<float>& expectedOutputValues,
39 boost::ignore_unused(memoryManager);
45 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
49 if (armnn::IsQuantizedType<T>())
52 inputTensorInfo.SetQuantizationOffset(qOffset);
53 outputTensorInfo.SetQuantizationScale(qScale);
54 outputTensorInfo.SetQuantizationOffset(qOffset);
55 tensorInfo.SetQuantizationScale(qScale);
56 tensorInfo.SetQuantizationOffset(qOffset);
59 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
62 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
63 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
64 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
65 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
69 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
70 QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
72 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
73 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
81 descriptor.
m_Mean = &meanTensor;
83 descriptor.
m_Beta = &betaTensor;
84 descriptor.
m_Gamma = &gammaTensor;
94 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
95 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
99 inputHandle->Allocate();
100 outputHandle->Allocate();
111 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
118 boost::ignore_unused(memoryManager);
120 const unsigned int width = 2;
121 const unsigned int height = 3;
122 const unsigned int channels = 2;
123 const unsigned int num = 1;
130 if(armnn::IsQuantizedType<T>())
133 inputTensorInfo.SetQuantizationOffset(qOffset);
134 outputTensorInfo.SetQuantizationScale(qScale);
135 outputTensorInfo.SetQuantizationOffset(qOffset);
136 tensorInfo.SetQuantizationScale(qScale);
137 tensorInfo.SetQuantizationOffset(qOffset);
140 auto input = MakeTensor<T, 4>(inputTensorInfo,
149 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
150 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
151 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
152 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
155 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
156 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
170 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
171 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
172 data.
m_Mean = &meanTensor;
174 data.
m_Beta = &betaTensor;
182 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
193 inputHandle->Allocate();
194 outputHandle->Allocate();
217 std::vector<float> inputValues
229 std::vector<float> expectedOutputValues
242 return BatchNormTestImpl<armnn::DataType::Float32>(
247 expectedOutputValues,
263 std::vector<float> inputValues
277 std::vector<float> expectedOutputValues
292 return BatchNormTestImpl<armnn::DataType::Float32>(
297 expectedOutputValues,
313 std::vector<float> inputValues
325 std::vector<float> expectedOutputValues
338 return BatchNormTestImpl<armnn::DataType::Float16>(
343 expectedOutputValues,
359 std::vector<float> inputValues
373 std::vector<float> expectedOutputValues
388 return BatchNormTestImpl<armnn::DataType::Float16>(
393 expectedOutputValues,
409 std::vector<float> inputValues
421 std::vector<float> expectedOutputValues
434 return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
439 expectedOutputValues,
455 std::vector<float> inputValues
469 std::vector<float> expectedOutputValues
484 return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
487 inputOutputShape, inputValues, expectedOutputValues,
501 std::vector<float> inputValues
513 std::vector<float> expectedOutputValues
526 return BatchNormTestImpl<armnn::DataType::QSymmS16>(
531 expectedOutputValues,
547 std::vector<float> inputValues
561 std::vector<float> expectedOutputValues
576 return BatchNormTestImpl<armnn::DataType::QSymmS16>(
581 expectedOutputValues,
592 boost::ignore_unused(memoryManager);
593 const unsigned int width = 2;
594 const unsigned int height = 3;
595 const unsigned int channels = 5;
596 const unsigned int batchSize = 3;
602 constexpr
unsigned int shape[] = {batchSize, channels, height, width};
603 constexpr
unsigned int tensorShape[] = {channels};
609 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
611 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
612 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
613 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
614 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
618 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
619 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
621 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
622 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
636 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
637 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
638 data.
m_Mean = &meanTensor;
640 data.
m_Beta = &betaTensor;
646 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
647 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
652 inputHandle->Allocate();
653 outputHandle->Allocate();
654 inputHandleRef->Allocate();
655 outputHandleRef->Allocate();
660 workload->PostAllocationConfigure();
662 workloadRef->PostAllocationConfigure();
663 workloadRef->Execute();
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > BatchNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerDescriptor m_Parameters
LayerTestResult< int16_t, 4 > BatchNormInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
const ConstCpuTensorHandle * m_Mean
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > CompareBatchNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
const ConstCpuTensorHandle * m_Variance
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstCpuTensorHandle * m_Gamma
LayerTestResult< uint8_t, 4 > BatchNormUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
const ConstCpuTensorHandle * m_Beta