aboutsummaryrefslogtreecommitdiff
path: root/src/backends/test/BatchNormTestImpl.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/test/BatchNormTestImpl.hpp')
-rw-r--r--src/backends/test/BatchNormTestImpl.hpp97
1 files changed, 41 insertions, 56 deletions
diff --git a/src/backends/test/BatchNormTestImpl.hpp b/src/backends/test/BatchNormTestImpl.hpp
index ab5413d277..4941b00a49 100644
--- a/src/backends/test/BatchNormTestImpl.hpp
+++ b/src/backends/test/BatchNormTestImpl.hpp
@@ -14,23 +14,25 @@
#include <backends/test/QuantizeHelper.hpp>
-
template<typename T>
-LayerTestResult<T,4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
- float qScale,
- int32_t qOffset)
+LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputOutputTensorShape,
+ const std::vector<float>& inputValues,
+ const std::vector<float>& expectedOutputValues,
+ float qScale,
+ int32_t qOffset,
+ armnn::DataLayout dataLayout)
{
- const unsigned int width = 2;
- const unsigned int height = 3;
- const unsigned int channels = 2;
- const unsigned int num = 1;
+ armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
+
+ armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout);
- armnn::TensorInfo inputTensorInfo({num, channels, height, width}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({num, channels, height, width}, armnn::GetDataType<T>());
- armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
+ armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
+ armnn::GetDataType<T>());
// Set quantization parameters if the requested type is a quantized type.
- if(armnn::IsQuantizedType<T>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
inputTensorInfo.SetQuantizationOffset(qOffset);
@@ -40,73 +42,56 @@ LayerTestResult<T,4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
tensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>(qScale, qOffset,
- {
- 1.f, 4.f,
- 4.f, 2.f,
- 1.f, 6.f,
-
- 1.f, 1.f,
- 4.f, 1.f,
- -2.f, 4.f
- }));
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+ QuantizedVector<T>(qScale, qOffset, inputValues));
+
// These values are per-channel of the input.
auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
- auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
- auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
- auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
- LayerTestResult<T,4> ret(outputTensorInfo);
+ auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
+ auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
+ auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+
+ result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
+ QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::BatchNormalizationQueueDescriptor data;
- armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::BatchNormalizationQueueDescriptor descriptor;
+ descriptor.m_Mean = &meanTensor;
+ descriptor.m_Variance = &varianceTensor;
+ descriptor.m_Beta = &betaTensor;
+ descriptor.m_Gamma = &gammaTensor;
+ descriptor.m_Parameters.m_Eps = 0.0f;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+ armnn::WorkloadInfo info;
+
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- data.m_Mean = &meanTensor;
- data.m_Variance = &varianceTensor;
- data.m_Beta = &betaTensor;
- data.m_Gamma = &gammaTensor;
- data.m_Parameters.m_Eps = 0.0f;
-
- // For each channel:
- // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
- // multiply by gamma and add beta
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>(qScale, qOffset,
- {
- 1.f, 4.f,
- 4.f, 2.f,
- 1.f, 6.f,
-
- 3.f, 3.f,
- 4.f, 3.f,
- 2.f, 4.f
- }));
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
workloadFactory.Finalize();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return ret;
+ return result;
}