diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/armnn/test/CreateWorkload.hpp | 58 | ||||
-rw-r--r-- | src/backends/cl/test/ClCreateWorkloadTests.cpp | 5 | ||||
-rw-r--r-- | src/backends/neon/test/NeonCreateWorkloadTests.cpp | 5 |
3 files changed, 62 insertions, 6 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 3f3cdc3986..60beb51c32 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -279,6 +279,64 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW return workload; } +template <typename Convolution2dWorkload, armnn::DataType DataType> +std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW, + const ModelOptions& modelOptions = {}) +{ + // Creates the layer we're testing. + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 0; + layerDesc.m_PadRight = 0; + layerDesc.m_PadTop = 0; + layerDesc.m_PadBottom = 0; + layerDesc.m_StrideX = 1; + layerDesc.m_StrideY = 1; + layerDesc.m_BiasEnabled = false; + layerDesc.m_DataLayout = dataLayout; + + Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer"); + + TensorShape weightShape = TensorShape{32, 32, 3, 3}; + TensorShape inputShape = TensorShape{1, 32, 149, 149}; + TensorShape outputShape = TensorShape{1, 32, 147, 147}; + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType))); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // Creates extra layers. + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // Connects up. + Connect(input, layer, TensorInfo(inputShape, DataType)); + Connect(layer, output, TensorInfo(outputShape, DataType)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 0); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 0); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 0); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 0); + BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + template <typename LstmWorkload> std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index fc5ccfe487..4bd3d3a33d 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -322,7 +322,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload) ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager(), modelOptions); auto workload = - CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(factory, + CreateConvolution2dWorkloadFastMathTest<ClConvolution2dWorkload, armnn::DataType::Float32>(factory, graph, DataLayout::NCHW, modelOptions); @@ -331,8 +331,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload) auto conv2dWorkload = PolymorphicDowncast<ClConvolution2dWorkload*>(workload.get()); IgnoreUnused(conv2dWorkload); ARMNN_ASSERT(conv2dWorkload != nullptr); - // fast_math enabled but configuration does not match with WINOGRAD - ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::GEMM); + ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD); } template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType> diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 99ff9ae8b8..c994bfe55a 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -292,7 +292,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload) NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager(), modelOptions); auto workload = - CreateConvolution2dWorkloadTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory, + CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory, graph, DataLayout::NCHW, modelOptions); @@ -301,8 +301,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload) auto conv2dWorkload = PolymorphicDowncast<NeonConvolution2dWorkload*>(workload.get()); IgnoreUnused(conv2dWorkload); ARMNN_ASSERT(conv2dWorkload != nullptr); - // fast_math enabled but configuration does not match with WINOGRAD - ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::GEMM); + ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD); } template <typename armnn::DataType DataType> |