// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include #include template LayerTestResult BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, const armnn::TensorShape& inputOutputTensorShape, const std::vector& inputValues, const std::vector& expectedOutputValues, float qScale, int32_t qOffset, armnn::DataLayout dataLayout) { armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType()); armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType()); armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout); armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] }, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. if (armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); tensorInfo.SetQuantizationScale(qScale); tensorInfo.SetQuantizationOffset(qOffset); } auto inputTensor = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputValues)); // These values are per-channel of the input. auto mean = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, -2})); auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, expectedOutputValues)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); armnn::BatchNormalizationQueueDescriptor descriptor; descriptor.m_Mean = &meanTensor; descriptor.m_Variance = &varianceTensor; descriptor.m_Beta = &betaTensor; descriptor.m_Gamma = &gammaTensor; descriptor.m_Parameters.m_Eps = 0.0f; descriptor.m_Parameters.m_DataLayout = dataLayout; armnn::WorkloadInfo info; AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateBatchNormalization(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); workloadFactory.Finalize(); workload->Execute(); CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); return result; }