// // Copyright © 2017 Arm Ltd. All rights reserved. // See LICENSE file in the project root for full license information. // #pragma once #include #include #include "backends/WorkloadData.hpp" #include "Layers.hpp" #include "Graph.hpp" #include #include "backends/CpuTensorHandle.hpp" using namespace armnn; namespace { using namespace std; // Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type template std::unique_ptr MakeAndCheckWorkload(Layer& layer, Graph& graph, const IWorkloadFactory& factory) { std::unique_ptr workload = layer.CreateWorkload(graph, factory); BOOST_TEST(workload.get() == boost::polymorphic_downcast(workload.get()), "Cannot convert to derived class"); std::string reasonIfUnsupported; BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported)); return std::unique_ptr(static_cast(workload.release())); } // connects two layers void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0) { from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo); } // helper function to create tensor handlers for workloads, assuming they all use the same factory void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory) { for (auto&& layer : graph.TopologicalSort()) { layer->CreateTensorHandles(graph, factory); } } ///////////////////////////////////////////////////////////////////////////////////////////// // The following functions are called by backends/test/CreateWorkload*.cpp // They build very simple graphs, and then create a workload. // Some checks are performed on the workload to ensure parameters have been passed correctly. // They return the created workloads so that backend-specific checks can be performed. ///////////////////////////////////////////////////////////////////////////////////////////// template std::unique_ptr CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing ActivationDescriptor layerDesc; layerDesc.m_Function = ActivationFunction::Abs; layerDesc.m_A = 3.5f; layerDesc.m_B = -10.0f; ActivationLayer* const layer = graph.AddLayer(layerDesc, "layer"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo tensorInfo({1, 1}, ActivationWorkload::ms_DataType); Connect(input, layer, tensorInfo); Connect(layer, output, tensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); ActivationQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_A == 3.5f); BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f); BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs)); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateAdditionWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing Layer* const layer = graph.AddLayer("layer"); // create extra layers Layer* const input1 = graph.AddLayer(1, "input1"); Layer* const input2 = graph.AddLayer(2, "input2"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo tensorInfo({2, 3}, AdditionWorkload::ms_DataType); Connect(input1, layer, tensorInfo, 0, 0); Connect(input2, layer, tensorInfo, 0, 1); Connect(layer, output, tensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); AdditionQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 2); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateBatchNormalizationWorkloadTest( armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing BatchNormalizationDescriptor layerDesc; layerDesc.m_Eps = 0.05f; BatchNormalizationLayer* const layer = graph.AddLayer(layerDesc, "layer"); armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32); layer->m_Mean = std::make_unique(weightInfo); layer->m_Variance = std::make_unique(weightInfo); layer->m_Beta = std::make_unique(weightInfo); layer->m_Gamma = std::make_unique(weightInfo); layer->m_Mean->Allocate(); layer->m_Variance->Allocate(); layer->m_Beta->Allocate(); layer->m_Gamma->Allocate(); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32); Connect(input, layer, tensorInfo); Connect(layer, output, tensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing Convolution2dDescriptor layerDesc; layerDesc.m_PadLeft = 3; layerDesc.m_PadRight = 3; layerDesc.m_PadTop = 1; layerDesc.m_PadBottom = 1; layerDesc.m_StrideX = 2; layerDesc.m_StrideY = 4; layerDesc.m_BiasEnabled = true; Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); layer->m_Weight = std::make_unique(TensorInfo({2, 3, 5, 3}, Convolution2dWorkload::ms_DataType)); layer->m_Bias = std::make_unique (TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType))); layer->m_Weight->Allocate(); layer->m_Bias->Allocate(); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up Connect(input, layer, TensorInfo({2, 3, 8, 16}, Convolution2dWorkload::ms_DataType)); Connect(layer, output, TensorInfo({2, 2, 2, 10}, Convolution2dWorkload::ms_DataType)); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4); BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 5, 3}, Convolution2dWorkload::ms_DataType))); BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType)))); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing Convolution2dDescriptor layerDesc; layerDesc.m_PadLeft = 1; layerDesc.m_PadRight = 1; layerDesc.m_PadTop = 1; layerDesc.m_PadBottom = 1; layerDesc.m_StrideX = 1; layerDesc.m_StrideY = 1; layerDesc.m_BiasEnabled = true; Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); float inputsQScale = Convolution2dWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0; float outputQScale = Convolution2dWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0; layer->m_Weight = std::make_unique(TensorInfo({ 2, 3, 3, 3 }, Convolution2dWorkload::ms_DataType, inputsQScale)); layer->m_Bias = std::make_unique (TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType), inputsQScale)); layer->m_Weight->Allocate(); layer->m_Bias->Allocate(); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up Connect(input, layer, TensorInfo({2, 3, 6, 6}, Convolution2dWorkload::ms_DataType, inputsQScale)); Connect(layer, output, TensorInfo({2, 2, 6, 6}, Convolution2dWorkload::ms_DataType, outputQScale)); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3}, Convolution2dWorkload::ms_DataType, inputsQScale))); BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType), inputsQScale))); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateDepthwiseConvolution2dWorkloadTest( armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing DepthwiseConvolution2dDescriptor layerDesc; layerDesc.m_PadLeft = 3; layerDesc.m_PadRight = 3; layerDesc.m_PadTop = 1; layerDesc.m_PadBottom = 1; layerDesc.m_StrideX = 2; layerDesc.m_StrideY = 4; layerDesc.m_BiasEnabled = true; DepthwiseConvolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); layer->m_Weight = std::make_unique(TensorInfo({3, 3, 5, 3}, DataType::Float32)); layer->m_Bias = std::make_unique(TensorInfo({9}, DataType::Float32)); layer->m_Weight->Allocate(); layer->m_Bias->Allocate(); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32)); Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32)); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4); BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({3, 3, 5, 3}, DataType::Float32))); BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({9}, DataType::Float32))); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing FullyConnectedDescriptor layerDesc; layerDesc.m_BiasEnabled = true; layerDesc.m_TransposeWeightMatrix = true; FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); float inputsQScale = FullyConnectedWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0; float outputQScale = FullyConnectedWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0; layer->m_Weight = std::make_unique(TensorInfo({7, 20}, FullyConnectedWorkload::ms_DataType, inputsQScale, 0)); layer->m_Bias = std::make_unique(TensorInfo({7}, GetBiasDataType(FullyConnectedWorkload::ms_DataType), inputsQScale)); layer->m_Weight->Allocate(); layer->m_Bias->Allocate(); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up Connect(input, layer, TensorInfo({3, 1, 4, 5}, FullyConnectedWorkload::ms_DataType, inputsQScale)); Connect(layer, output, TensorInfo({3, 7}, FullyConnectedWorkload::ms_DataType, outputQScale)); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, FullyConnectedWorkload::ms_DataType, inputsQScale))); BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(FullyConnectedWorkload::ms_DataType), inputsQScale))); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateMultiplicationWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing Layer* const layer = graph.AddLayer("layer"); // create extra layers Layer* const input1 = graph.AddLayer(1, "input1"); Layer* const input2 = graph.AddLayer(2, "input2"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo tensorInfo({2, 3}, MultiplicationWorkload::ms_DataType); Connect(input1, layer, tensorInfo, 0, 0); Connect(input2, layer, tensorInfo, 0, 1); Connect(layer, output, tensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); MultiplicationQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 2); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing NormalizationDescriptor layerDesc; layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across; layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness; layerDesc.m_NormSize = 3; layerDesc.m_Alpha = 0.5f; layerDesc.m_Beta = -1.0f; layerDesc.m_K = 0.2f; NormalizationLayer* layer = graph.AddLayer(layerDesc, "layer"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up Connect(input, layer, TensorInfo({3, 5, 5, 1}, armnn::DataType::Float32)); Connect(layer, output, TensorInfo({3, 5, 5, 1}, armnn::DataType::Float32)); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); NormalizationQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across)); BOOST_TEST((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness)); BOOST_TEST(queueDescriptor.m_Parameters.m_NormSize == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_Alpha == 0.5f); BOOST_TEST(queueDescriptor.m_Parameters.m_Beta == -1.0f); BOOST_TEST(queueDescriptor.m_Parameters.m_K == 0.2f); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing Pooling2dDescriptor layerDesc; layerDesc.m_PoolType = PoolingAlgorithm::Average; layerDesc.m_PoolWidth = 3; layerDesc.m_PoolHeight = 3; layerDesc.m_PadLeft = 2; layerDesc.m_PadRight = 2; layerDesc.m_PadTop = 1; layerDesc.m_PadBottom = 1; layerDesc.m_StrideX = 2; layerDesc.m_StrideY = 3; layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor; Pooling2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up Connect(input, layer, TensorInfo({3, 2, 5, 5}, Pooling2dWorkload::ms_DataType)); Connect(layer, output, TensorInfo({3, 2, 2, 4}, Pooling2dWorkload::ms_DataType)); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average)); BOOST_TEST((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor)); BOOST_TEST(queueDescriptor.m_Parameters.m_PoolWidth == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_PoolHeight == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2); BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 3); BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 2); BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2); BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing SoftmaxDescriptor softmaxDescriptor; Layer* const layer = graph.AddLayer(softmaxDescriptor, "layer"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo tensorInfo({4, 1}, SoftmaxWorkload::ms_DataType); Connect(input, layer, tensorInfo); Connect(layer, output, tensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing ViewsDescriptor layerDesc(3, 2); layerDesc.SetViewOriginCoord(0, 1, 2); // deliberately add these in a weird order layerDesc.SetViewOriginCoord(2, 1, 0); layerDesc.SetViewOriginCoord(1, 1, 3); Layer* const layer = graph.AddLayer(layerDesc, "layer"); // add extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output0 = graph.AddLayer(0, "output0"); Layer* const output1 = graph.AddLayer(1, "output1"); Layer* const output2 = graph.AddLayer(2, "output2"); // connect up armnn::TensorInfo tensorInfo({1, 7}, SplitterWorkload::ms_DataType); Connect(input, layer, tensorInfo); armnn::TensorInfo output0Info({1, 2}, SplitterWorkload::ms_DataType); armnn::TensorInfo output1Info({1, 1}, SplitterWorkload::ms_DataType); armnn::TensorInfo output2Info({1, 4}, SplitterWorkload::ms_DataType); Connect(layer, output1, output1Info, 1, 0); // deliberately connect these up in a weird order Connect(layer, output0, output0Info, 2, 0); Connect(layer, output2, output2Info, 0, 0); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); SplitterQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 3); BOOST_TEST(queueDescriptor.m_ViewOrigins.size() == 3); BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0); BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 0); BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 0); BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 2); BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 3); BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0); // return so we can do extra, backend-specific tests return workload; } /// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads template std::pair, std::unique_ptr> CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { static_assert(SplitterWorkload::ms_DataType == MergerWorkload::ms_DataType, "Splitter and merger workloads must have the same data type"); armnn::TensorInfo inputTensorInfo({ 1, 1, 100, 10 }, SplitterWorkload::ms_DataType); armnn::TensorInfo splitTensorInfo1({ 1, 1, 60, 10 }, SplitterWorkload::ms_DataType); armnn::TensorInfo splitTensorInfo2({ 1, 1, 40, 10 }, SplitterWorkload::ms_DataType); //construct the graph Layer* const input = graph.AddLayer(0, "input"); armnn::ViewsDescriptor splitterViews(2); splitterViews.SetViewOriginCoord(0, 0, 0); splitterViews.SetViewOriginCoord(0, 1, 0); splitterViews.SetViewOriginCoord(0, 2, 0); splitterViews.SetViewOriginCoord(0, 3, 0); splitterViews.SetViewOriginCoord(1, 0, 0); splitterViews.SetViewOriginCoord(1, 1, 0); splitterViews.SetViewOriginCoord(1, 2, 60); splitterViews.SetViewOriginCoord(1, 3, 0); Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); armnn::OriginsDescriptor mergerViews(2); mergerViews.SetViewOriginCoord(0, 0, 0); mergerViews.SetViewOriginCoord(0, 1, 0); mergerViews.SetViewOriginCoord(0, 2, 0); mergerViews.SetViewOriginCoord(0, 3, 0); mergerViews.SetViewOriginCoord(1, 0, 0); mergerViews.SetViewOriginCoord(1, 1, 0); mergerViews.SetViewOriginCoord(1, 2, 40); mergerViews.SetViewOriginCoord(1, 3, 0); Layer* const merger = graph.AddLayer(mergerViews, "merger"); Layer* const output = graph.AddLayer(0, "output"); // add connections Connect(input, splitter, inputTensorInfo, 0, 0); Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up Connect(splitter, merger, splitTensorInfo2, 1, 0); // so that the outputs are flipped round Connect(merger, output, inputTensorInfo, 0, 0); CreateTensorHandles(graph, factory); auto workloadSplitter = MakeAndCheckWorkload(*splitter, graph, factory); auto workloadMerger = MakeAndCheckWorkload(*merger, graph, factory); return {std::move(workloadSplitter), std::move(workloadMerger)}; } /// This function constructs a graph with a splitter with two outputs. Each of the outputs is then /// connected to two different activation layers template void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph, std::unique_ptr& wlSplitter, std::unique_ptr& wlActiv0_0, std::unique_ptr& wlActiv0_1, std::unique_ptr& wlActiv1_0, std::unique_ptr& wlActiv1_1) { static_assert(SplitterWorkload::ms_DataType == ActivationWorkload::ms_DataType, "Splitter and activation workloads must have the same data type"); armnn::TensorInfo inputTensorInfo({ 1, 1, 100, 10 }, SplitterWorkload::ms_DataType); armnn::TensorInfo splitTensorInfo1({ 1, 1, 60, 10 }, SplitterWorkload::ms_DataType); armnn::TensorInfo splitTensorInfo2({ 1, 1, 40, 10 }, SplitterWorkload::ms_DataType); //construct the graph Layer* const input = graph.AddLayer(0, "input"); armnn::ViewsDescriptor splitterViews(2); splitterViews.SetViewOriginCoord(0, 0, 0); splitterViews.SetViewOriginCoord(0, 1, 0); splitterViews.SetViewOriginCoord(0, 2, 0); splitterViews.SetViewOriginCoord(0, 3, 0); splitterViews.SetViewOriginCoord(1, 0, 0); splitterViews.SetViewOriginCoord(1, 1, 0); splitterViews.SetViewOriginCoord(1, 2, 60); splitterViews.SetViewOriginCoord(1, 3, 0); Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); armnn::ActivationDescriptor activationDesc; Layer* const activ0_0 = graph.AddLayer(activationDesc, "activ0_0"); Layer* const activ0_1 = graph.AddLayer(activationDesc, "activ0_1"); Layer* const activ1_0 = graph.AddLayer(activationDesc, "activ1_0"); Layer* const activ1_1 = graph.AddLayer(activationDesc, "activ1_1"); Layer* const output1 = graph.AddLayer(1, "output1"); Layer* const output2 = graph.AddLayer(2, "output2"); Layer* const output3 = graph.AddLayer(3, "output3"); Layer* const output4 = graph.AddLayer(4, "output4"); // add connections Connect(input, splitter, inputTensorInfo, 0, 0); Connect(splitter, activ0_0, splitTensorInfo1, 0, 0); Connect(splitter, activ0_1, splitTensorInfo1, 0, 0); Connect(splitter, activ1_0, splitTensorInfo2, 1, 0); Connect(splitter, activ1_1, splitTensorInfo2, 1, 0); Connect(activ0_0, output1, splitTensorInfo1, 0, 0); Connect(activ0_1, output2, splitTensorInfo1, 0, 0); Connect(activ1_0, output3, splitTensorInfo2, 0, 0); Connect(activ1_1, output4, splitTensorInfo2, 0, 0); CreateTensorHandles(graph, factory); auto workloadSplitter = MakeAndCheckWorkload(*splitter, graph, factory); auto workloadActiv0_0 = MakeAndCheckWorkload(*activ0_0, graph, factory); auto workloadActiv0_1 = MakeAndCheckWorkload(*activ0_1, graph, factory); auto workloadActiv1_0 = MakeAndCheckWorkload(*activ1_0, graph, factory); auto workloadActiv1_1 = MakeAndCheckWorkload(*activ1_1, graph, factory); wlSplitter = std::move(workloadSplitter); wlActiv0_0 = std::move(workloadActiv0_0); wlActiv0_1 = std::move(workloadActiv0_1); wlActiv1_0 = std::move(workloadActiv1_0); wlActiv1_1 = std::move(workloadActiv1_1); } template std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing TensorShape outputShape({ 2, 3, 2, 2 }); ResizeBilinearDescriptor resizeDesc; resizeDesc.m_TargetWidth = outputShape[3]; resizeDesc.m_TargetHeight = outputShape[2]; Layer* const layer = graph.AddLayer(resizeDesc, "layer"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo inputTensorInfo({ 2, 3, 4, 4 }, ResizeBilinearWorkload::ms_DataType); armnn::TensorInfo outputTensorInfo(outputShape, ResizeBilinearWorkload::ms_DataType); Connect(input, layer, inputTensorInfo); Connect(layer, output, outputTensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing Layer* const layer = graph.AddLayer("l2norm"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo inputTensorInfo({ 5, 20, 50, 67 }, L2NormalizationWorkload::ms_DataType); armnn::TensorInfo outputTensorInfo({ 5, 20, 50, 67 }, L2NormalizationWorkload::ms_DataType); Connect(input, layer, inputTensorInfo); Connect(layer, output, outputTensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } template std::unique_ptr CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) { // create the layer we're testing TensorShape outputShape({ 1, 4 }); ReshapeDescriptor reshapeDesc; reshapeDesc.m_TargetShape = outputShape; Layer* const layer = graph.AddLayer(reshapeDesc, "layer"); // create extra layers Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // connect up armnn::TensorInfo inputTensorInfo({ 4, 1 }, ReshapeWorkload::ms_DataType); armnn::TensorInfo outputTensorInfo(outputShape, ReshapeWorkload::ms_DataType); Connect(input, layer, inputTensorInfo); Connect(layer, output, outputTensorInfo); CreateTensorHandles(graph, factory); // make the workload and check it auto workload = MakeAndCheckWorkload(*layer, graph, factory); ReshapeQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); // return so we can do extra, backend-specific tests return workload; } }