diff options
Diffstat (limited to 'src/backends/test/BatchNormTestImpl.hpp')
-rw-r--r-- | src/backends/test/BatchNormTestImpl.hpp | 187 |
1 files changed, 0 insertions, 187 deletions
diff --git a/src/backends/test/BatchNormTestImpl.hpp b/src/backends/test/BatchNormTestImpl.hpp deleted file mode 100644 index 166c44435d..0000000000 --- a/src/backends/test/BatchNormTestImpl.hpp +++ /dev/null @@ -1,187 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#pragma once - -#include <armnn/ArmNN.hpp> -#include <armnn/Tensor.hpp> - -#include <test/TensorHelpers.hpp> - -#include <backends/CpuTensorHandle.hpp> -#include <backends/WorkloadFactory.hpp> - -#include <backends/test/QuantizeHelper.hpp> - -template<typename T> -LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputOutputTensorShape, - const std::vector<float>& inputValues, - const std::vector<float>& expectedOutputValues, - float qScale, - int32_t qOffset, - armnn::DataLayout dataLayout) -{ - armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>()); - armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>()); - - armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout); - - armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] }, - armnn::GetDataType<T>()); - - // Set quantization parameters if the requested type is a quantized type. - if (armnn::IsQuantizedType<T>()) - { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - tensorInfo.SetQuantizationScale(qScale); - tensorInfo.SetQuantizationOffset(qOffset); - } - - auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, - QuantizedVector<T>(qScale, qOffset, inputValues)); - - // These values are per-channel of the input. - auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2})); - auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9})); - auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1})); - - LayerTestResult<T, 4> result(outputTensorInfo); - - result.outputExpected = MakeTensor<T, 4>(inputTensorInfo, - QuantizedVector<T>(qScale, qOffset, expectedOutputValues)); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); - armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); - armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); - armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); - - armnn::BatchNormalizationQueueDescriptor descriptor; - descriptor.m_Mean = &meanTensor; - descriptor.m_Variance = &varianceTensor; - descriptor.m_Beta = &betaTensor; - descriptor.m_Gamma = &gammaTensor; - descriptor.m_Parameters.m_Eps = 0.0f; - descriptor.m_Parameters.m_DataLayout = dataLayout; - armnn::WorkloadInfo info; - - AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); - AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); - AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); - AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); - - workloadFactory.Finalize(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - - return result; -} - - -template<typename T> -LayerTestResult<T,4> BatchNormTestNhwcImpl(armnn::IWorkloadFactory& workloadFactory, - float qScale, - int32_t qOffset) -{ - const unsigned int width = 2; - const unsigned int height = 3; - const unsigned int channels = 2; - const unsigned int num = 1; - - armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>()); - armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>()); - armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>()); - - // Set quantization parameters if the requested type is a quantized type. - if(armnn::IsQuantizedType<T>()) - { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - tensorInfo.SetQuantizationScale(qScale); - tensorInfo.SetQuantizationOffset(qOffset); - } - - auto input = MakeTensor<T, 4>(inputTensorInfo, - QuantizedVector<T>(qScale, qOffset, - { - 1.f, 1.f, 4.f, 1.f, - 4.f, 4.f, 2.f, 1.f, - 1.f, -2.f, 6.f, 4.f - })); - // These values are per-channel of the input. - auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2})); - auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9})); - auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1})); - LayerTestResult<T,4> ret(outputTensorInfo); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::BatchNormalizationQueueDescriptor data; - armnn::WorkloadInfo info; - armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); - armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); - armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); - armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); - - AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); - AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); - AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); - AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); - - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - data.m_Mean = &meanTensor; - data.m_Variance = &varianceTensor; - data.m_Beta = &betaTensor; - data.m_Gamma = &gammaTensor; - data.m_Parameters.m_Eps = 0.0f; - data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; - - // For each channel: - // substract mean, divide by standard deviation (with an epsilon to avoid div by 0), - // multiply by gamma and add beta - ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(qScale, qOffset, - { - 1.f, 3.f, 4.f, 3.f, - 4.f, 4.f, 2.f, 3.f, - 1.f, 2.f, 6.f, 4.f - })); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workloadFactory.Finalize(); - workload->Execute(); - - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - - return ret; -}
\ No newline at end of file |