aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-08-27 15:38:12 +0100
committerKeithARM <keith.davis@arm.com>2020-08-31 16:05:39 +0000
commit33a626f14c8db13967ba2a933388223d5bf733c3 (patch)
tree109319777c0f638be639d7d723a417abe97d843c /src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
parent826a543547f325bfac7da507d19a55af9c138a50 (diff)
downloadarmnn-33a626f14c8db13967ba2a933388223d5bf733c3.tar.gz
IVGCVSW-5231 Remove CreateTensorHandle in the test where there is NO_DEPRECATE_WARN
* Done for all elementwise layers, Activation, BatchNorm, BatchToSpace Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: Id1d15a0960233026aecf7a07e0d3f006e07e4abf
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp61
1 files changed, 38 insertions, 23 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index 44cfc69c24..eb4f461eb9 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -14,6 +14,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
+#include <reference/test/RefWorkloadFactoryHelper.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -29,6 +30,7 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> BatchNormTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::TensorShape& inputOutputTensorShape,
const std::vector<float>& inputValues,
const std::vector<float>& expectedOutputValues,
@@ -69,10 +71,8 @@ LayerTestResult<T, 4> BatchNormTestImpl(
result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
@@ -114,6 +114,7 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> BatchNormTestNhwcImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -154,10 +155,8 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
LayerTestResult<T,4> ret(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
@@ -210,7 +209,8 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
LayerTestResult<float, 4> BatchNormFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Channels: 2
@@ -246,6 +246,7 @@ LayerTestResult<float, 4> BatchNormFloat32Test(
return BatchNormTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -256,7 +257,8 @@ LayerTestResult<float, 4> BatchNormFloat32Test(
LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Height: 3
@@ -296,6 +298,7 @@ LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
return BatchNormTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -306,7 +309,8 @@ LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Channels: 2
@@ -342,6 +346,7 @@ LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
return BatchNormTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -352,7 +357,8 @@ LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Height: 3
@@ -392,6 +398,7 @@ LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
return BatchNormTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -402,7 +409,8 @@ LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
LayerTestResult<uint8_t, 4> BatchNormUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Channels: 2
@@ -438,6 +446,7 @@ LayerTestResult<uint8_t, 4> BatchNormUint8Test(
return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -448,7 +457,8 @@ LayerTestResult<uint8_t, 4> BatchNormUint8Test(
LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Height: 3
@@ -488,13 +498,15 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape, inputValues, expectedOutputValues,
1.f/20.f, 50, armnn::DataLayout::NHWC);
}
LayerTestResult<int16_t, 4> BatchNormInt16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Channels: 2
@@ -530,6 +542,7 @@ LayerTestResult<int16_t, 4> BatchNormInt16Test(
return BatchNormTestImpl<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -540,7 +553,8 @@ LayerTestResult<int16_t, 4> BatchNormInt16Test(
LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
// BatchSize: 1
// Height: 3
@@ -580,6 +594,7 @@ LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
return BatchNormTestImpl<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputOutputShape,
inputValues,
expectedOutputValues,
@@ -591,7 +606,9 @@ LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
LayerTestResult<float,4> CompareBatchNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::IWorkloadFactory& refWorkloadFactory)
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory)
{
IgnoreUnused(memoryManager);
const unsigned int width = 2;
@@ -619,13 +636,11 @@ LayerTestResult<float,4> CompareBatchNormTest(
LayerTestResult<float,4> ret(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;