diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2018-11-01 16:15:57 +0000 |
---|---|---|
committer | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2018-11-02 14:49:21 +0000 |
commit | c9cc80455ff29fd2c8622c9487ec9c57ade6ea30 (patch) | |
tree | 41b1491312fe6082b39d5d37ffa0dcf0ab0f2817 /src/backends/backendsCommon/test/BatchNormTestImpl.hpp | |
parent | 207ef9a6b8b3ea0afe9a095639f67b5dedd095d7 (diff) | |
download | armnn-c9cc80455ff29fd2c8622c9487ec9c57ade6ea30.tar.gz |
IVGCVSW-1946: Remove armnn/src from the include paths
Change-Id: I663a0a0fccb43ee960ec070121a59df9db0bb04e
Diffstat (limited to 'src/backends/backendsCommon/test/BatchNormTestImpl.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/BatchNormTestImpl.hpp | 186 |
1 files changed, 186 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/BatchNormTestImpl.hpp b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp new file mode 100644 index 0000000000..2360fd56e2 --- /dev/null +++ b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp @@ -0,0 +1,186 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> + +#include <backendsCommon/CpuTensorHandle.hpp> +#include <backendsCommon/WorkloadFactory.hpp> +#include <backendsCommon/test/QuantizeHelper.hpp> + +#include <test/TensorHelpers.hpp> + +template<typename T> +LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, + const armnn::TensorShape& inputOutputTensorShape, + const std::vector<float>& inputValues, + const std::vector<float>& expectedOutputValues, + float qScale, + int32_t qOffset, + armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>()); + + armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout); + + armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] }, + armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if (armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + tensorInfo.SetQuantizationScale(qScale); + tensorInfo.SetQuantizationOffset(qOffset); + } + + auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, inputValues)); + + // These values are per-channel of the input. + auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2})); + auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9})); + auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2})); + auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1})); + + LayerTestResult<T, 4> result(outputTensorInfo); + + result.outputExpected = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, expectedOutputValues)); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); + armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); + armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); + armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); + + armnn::BatchNormalizationQueueDescriptor descriptor; + descriptor.m_Mean = &meanTensor; + descriptor.m_Variance = &varianceTensor; + descriptor.m_Beta = &betaTensor; + descriptor.m_Gamma = &gammaTensor; + descriptor.m_Parameters.m_Eps = 0.0f; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + + AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); + AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); + AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); + AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +} + + +template<typename T> +LayerTestResult<T,4> BatchNormTestNhwcImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + const unsigned int width = 2; + const unsigned int height = 3; + const unsigned int channels = 2; + const unsigned int num = 1; + + armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>()); + armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + tensorInfo.SetQuantizationScale(qScale); + tensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, + { + 1.f, 1.f, 4.f, 1.f, + 4.f, 4.f, 2.f, 1.f, + 1.f, -2.f, 6.f, 4.f + })); + // These values are per-channel of the input. + auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2})); + auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9})); + auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2})); + auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1})); + LayerTestResult<T,4> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::BatchNormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); + armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); + armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); + armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); + + AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); + AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); + AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); + AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Mean = &meanTensor; + data.m_Variance = &varianceTensor; + data.m_Beta = &betaTensor; + data.m_Gamma = &gammaTensor; + data.m_Parameters.m_Eps = 0.0f; + data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; + + // For each channel: + // substract mean, divide by standard deviation (with an epsilon to avoid div by 0), + // multiply by gamma and add beta + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, + { + 1.f, 3.f, 4.f, 3.f, + 4.f, 4.f, 2.f, 3.f, + 1.f, 2.f, 6.f, 4.f + })); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +}
\ No newline at end of file |