From 8eb675eb77865b5d2491f5b2d650ce993cab738c Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Wed, 17 Oct 2018 14:43:29 +0100 Subject: IVGCVSW-2038 + IVGCVSW-2039 + IVGCVSW-2040 Add NHWC support to the Float32 and UInt8 BatchNormalization workloads * Enabled NHWC support in RefBatchNormalizationFloat32Workload * Added NHWC unit tests for both FP32 and U8 * Refactored the existing unit tests Change-Id: I6aa18f1dcc0666b80a17a7ed229cf53607bae147 --- src/backends/test/BatchNormTestImpl.hpp | 97 ++++++++++++++------------------- 1 file changed, 41 insertions(+), 56 deletions(-) (limited to 'src/backends/test/BatchNormTestImpl.hpp') diff --git a/src/backends/test/BatchNormTestImpl.hpp b/src/backends/test/BatchNormTestImpl.hpp index ab5413d277..4941b00a49 100644 --- a/src/backends/test/BatchNormTestImpl.hpp +++ b/src/backends/test/BatchNormTestImpl.hpp @@ -14,23 +14,25 @@ #include - template -LayerTestResult BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, - float qScale, - int32_t qOffset) +LayerTestResult BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, + const armnn::TensorShape& inputOutputTensorShape, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + float qScale, + int32_t qOffset, + armnn::DataLayout dataLayout) { - const unsigned int width = 2; - const unsigned int height = 3; - const unsigned int channels = 2; - const unsigned int num = 1; + armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType()); + + armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout); - armnn::TensorInfo inputTensorInfo({num, channels, height, width}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({num, channels, height, width}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType()); + armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] }, + armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. - if(armnn::IsQuantizedType()) + if (armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); @@ -40,73 +42,56 @@ LayerTestResult BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, tensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, - { - 1.f, 4.f, - 4.f, 2.f, - 1.f, 6.f, - - 1.f, 1.f, - 4.f, 1.f, - -2.f, 4.f - })); + auto inputTensor = MakeTensor(inputTensorInfo, + QuantizedVector(qScale, qOffset, inputValues)); + // These values are per-channel of the input. auto mean = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, -2})); - auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); - auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); - LayerTestResult ret(outputTensorInfo); + auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); + auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); + auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); + + LayerTestResult result(outputTensorInfo); + + result.outputExpected = MakeTensor(inputTensorInfo, + QuantizedVector(qScale, qOffset, expectedOutputValues)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::BatchNormalizationQueueDescriptor data; - armnn::WorkloadInfo info; armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); + armnn::BatchNormalizationQueueDescriptor descriptor; + descriptor.m_Mean = &meanTensor; + descriptor.m_Variance = &varianceTensor; + descriptor.m_Beta = &betaTensor; + descriptor.m_Gamma = &gammaTensor; + descriptor.m_Parameters.m_Eps = 0.0f; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - data.m_Mean = &meanTensor; - data.m_Variance = &varianceTensor; - data.m_Beta = &betaTensor; - data.m_Gamma = &gammaTensor; - data.m_Parameters.m_Eps = 0.0f; - - // For each channel: - // substract mean, divide by standard deviation (with an epsilon to avoid div by 0), - // multiply by gamma and add beta - ret.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, - { - 1.f, 4.f, - 4.f, 2.f, - 1.f, 6.f, - - 3.f, 3.f, - 4.f, 3.f, - 2.f, 4.f - })); - - std::unique_ptr workload = workloadFactory.CreateBatchNormalization(data, info); + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateBatchNormalization(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); workloadFactory.Finalize(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return ret; + return result; } -- cgit v1.2.1