From 33a626f14c8db13967ba2a933388223d5bf733c3 Mon Sep 17 00:00:00 2001 From: Keith Davis Date: Thu, 27 Aug 2020 15:38:12 +0100 Subject: IVGCVSW-5231 Remove CreateTensorHandle in the test where there is NO_DEPRECATE_WARN * Done for all elementwise layers, Activation, BatchNorm, BatchToSpace Signed-off-by: Keith Davis Change-Id: Id1d15a0960233026aecf7a07e0d3f006e07e4abf --- .../test/layerTests/AdditionTestImpl.cpp | 121 ++++++++++++--------- 1 file changed, 68 insertions(+), 53 deletions(-) (limited to 'src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp index c0a2ca8faf..0e1b7336de 100644 --- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp @@ -8,6 +8,7 @@ #include "ElementwiseTestImpl.hpp" #include +#include template<> std::unique_ptr CreateWorkload( @@ -20,7 +21,8 @@ std::unique_ptr CreateWorkload LayerTestResult AdditionTest( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { unsigned int batchSize = 2u; unsigned int channels = 2u; @@ -83,12 +85,14 @@ LayerTestResult AdditionTest( shape, input2, shape, - output); + output, + tensorHandleFactory); } LayerTestResult Addition5dTest( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { unsigned int depth = 2u; unsigned int batchSize = 2u; @@ -155,7 +159,8 @@ LayerTestResult Addition5dTest( shape, input2, shape, - output); + output, + tensorHandleFactory); } template> @@ -163,7 +168,8 @@ LayerTestResult AdditionBroadcastTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, - int32_t qOffset) + int32_t qOffset, + const armnn::ITensorHandleFactory& tensorHandleFactory) { IgnoreUnused(memoryManager); armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType); @@ -214,11 +220,9 @@ LayerTestResult AdditionBroadcastTestImpl( }, qScale, qOffset)); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - ARMNN_NO_DEPRECATE_WARN_END + std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::AdditionQueueDescriptor data; armnn::WorkloadInfo info; @@ -248,7 +252,8 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, - int32_t qOffset) + int32_t qOffset, + const armnn::ITensorHandleFactory& tensorHandleFactory) { IgnoreUnused(memoryManager); armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType); @@ -294,11 +299,9 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( }, qScale, qOffset)); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - ARMNN_NO_DEPRECATE_WARN_END + std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::AdditionQueueDescriptor data; armnn::WorkloadInfo info; @@ -325,71 +328,80 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( LayerTestResult AdditionBroadcastTest( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcastTestImpl( - workloadFactory, memoryManager, 0.0f, 0); + workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory); } LayerTestResult AdditionBroadcastUint8Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcastTestImpl( - workloadFactory, memoryManager, 2.f, 0); + workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory); } LayerTestResult AdditionBroadcastInt16Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcastTestImpl( - workloadFactory, memoryManager, 2.f, 0); + workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory); } LayerTestResult AdditionBroadcastInt32Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcastTestImpl( - workloadFactory, memoryManager, 1.f, 0); + workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory); } LayerTestResult AdditionBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcast1ElementTestImpl( - workloadFactory, memoryManager, 0.0f, 0); + workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory); } LayerTestResult AdditionBroadcast1ElementUint8Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcast1ElementTestImpl( - workloadFactory, memoryManager, 0.1333333f, 128); + workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory); } LayerTestResult AdditionBroadcast1ElementInt16Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcast1ElementTestImpl( - workloadFactory, memoryManager, 0.1333333f, 0); + workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory); } LayerTestResult AdditionBroadcast1ElementInt32Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { return AdditionBroadcast1ElementTestImpl( - workloadFactory, memoryManager, 1.f, 0); + workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory); } LayerTestResult AdditionUint8Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { const unsigned int shape0[] = { 1, 2, 2, 3 }; const unsigned int shape1[] = { 1, 2, 2, 3 }; @@ -425,13 +437,15 @@ LayerTestResult AdditionUint8Test( 3, shape0, output, + tensorHandleFactory, 7.0f, 3); } LayerTestResult AdditionInt16Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { const unsigned int shape0[] = { 1, 2, 2, 3 }; const unsigned int shape1[] = { 1, 2, 2, 3 }; @@ -467,13 +481,15 @@ LayerTestResult AdditionInt16Test( 0, shape0, output, + tensorHandleFactory, 7.0f, 0); } LayerTestResult AdditionInt32Test( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { const unsigned int shape0[] = { 1, 2, 2, 3 }; const unsigned int shape1[] = { 1, 2, 2, 3 }; @@ -509,13 +525,15 @@ LayerTestResult AdditionInt32Test( 0, shape0, output, + tensorHandleFactory, 1.0f, 0); } LayerTestResult AdditionAfterMaxPoolTest( armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) { IgnoreUnused(memoryManager); @@ -532,12 +550,10 @@ LayerTestResult AdditionAfterMaxPoolTest( 4, 5, 6, 7, 8, 9 }); - ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr poolingInputHandle = - workloadFactory.CreateTensorHandle(poolingInputTensorInfo); + tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo); std::unique_ptr poolingOutputHandle = - workloadFactory.CreateTensorHandle(poolingOutputTensorInfo); - ARMNN_NO_DEPRECATE_WARN_END + tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo); // Apply MaxPool poolSize = 1x1, stride=2x2 // Result = @@ -587,10 +603,9 @@ LayerTestResult AdditionAfterMaxPoolTest( 31, 37 })); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::unique_ptr addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo); - std::unique_ptr addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo); - ARMNN_NO_DEPRECATE_WARN_END + std::unique_ptr addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo); + std::unique_ptr addOutputHandle = + tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo); armnn::AdditionQueueDescriptor data; armnn::WorkloadInfo info; @@ -626,7 +641,9 @@ LayerTestResult AdditionAfterMaxPoolTest( LayerTestResult CompareAdditionTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - armnn::IWorkloadFactory& refWorkloadFactory) + armnn::IWorkloadFactory& refWorkloadFactory, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::ITensorHandleFactory& refTensorHandleFactory) { IgnoreUnused(memoryManager); unsigned int batchSize = 4; @@ -648,15 +665,13 @@ LayerTestResult CompareAdditionTest( LayerTestResult ret(outputTensorInfo); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - std::unique_ptr inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2); - std::unique_ptr outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); - ARMNN_NO_DEPRECATE_WARN_END + std::unique_ptr inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::AdditionQueueDescriptor data; armnn::WorkloadInfo info; -- cgit v1.2.1