aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/CreateWorkload.hpp
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
commitc577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch)
treebd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnn/test/CreateWorkload.hpp
parent4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff)
downloadarmnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz
Release 18.08
Diffstat (limited to 'src/armnn/test/CreateWorkload.hpp')
-rw-r--r--src/armnn/test/CreateWorkload.hpp487
1 files changed, 318 insertions, 169 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index c3f4b8a1bf..ee0c584b13 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -22,7 +22,7 @@ namespace
using namespace std;
-// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type
+// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
template<typename Workload>
std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const IWorkloadFactory& factory)
{
@@ -30,18 +30,19 @@ std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const
BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()),
"Cannot convert to derived class");
std::string reasonIfUnsupported;
+ layer.SetComputeDevice(factory.GetCompute());
BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported));
return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
}
-// connects two layers
+// Connects two layers.
void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
{
from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
}
-// helper function to create tensor handlers for workloads, assuming they all use the same factory
+// Helper function to create tensor handlers for workloads, assuming they all use the same factory.
void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory)
{
for (auto&& layer : graph.TopologicalSort())
@@ -57,11 +58,11 @@ void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory)
// They return the created workloads so that backend-specific checks can be performed.
/////////////////////////////////////////////////////////////////////////////////////////////
-template <typename ActivationWorkload>
+template <typename ActivationWorkload, armnn::DataType DataType>
std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
ActivationDescriptor layerDesc;
layerDesc.m_Function = ActivationFunction::Abs;
layerDesc.m_A = 3.5f;
@@ -69,19 +70,19 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer");
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo tensorInfo({1, 1}, ActivationWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo tensorInfo({1, 1}, DataType);
Connect(input, layer, tensorInfo);
Connect(layer, output, tensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, graph, factory);
ActivationQueueDescriptor queueDescriptor = workload->GetData();
@@ -91,51 +92,51 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f);
BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename AdditionWorkload>
+template <typename AdditionWorkload, armnn::DataType DataType>
std::unique_ptr<AdditionWorkload> CreateAdditionWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
Layer* const layer = graph.AddLayer<AdditionLayer>("layer");
- // create extra layers
+ // Creates extra layers.
Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo tensorInfo({2, 3}, AdditionWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
Connect(input1, layer, tensorInfo, 0, 0);
Connect(input2, layer, tensorInfo, 0, 1);
Connect(layer, output, tensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<AdditionWorkload>(*layer, graph, factory);
AdditionQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename BatchNormalizationFloat32Workload>
+template <typename BatchNormalizationFloat32Workload, armnn::DataType DataType>
std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkloadTest(
armnn::IWorkloadFactory& factory, armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
BatchNormalizationDescriptor layerDesc;
layerDesc.m_Eps = 0.05f;
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
- armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
+ armnn::TensorInfo weightInfo({3}, DataType);
layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
@@ -145,37 +146,37 @@ std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkl
layer->m_Beta->Allocate();
layer->m_Gamma->Allocate();
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3, 1, 1}, DataType);
Connect(input, layer, tensorInfo);
Connect(layer, output, tensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<BatchNormalizationFloat32Workload>(*layer, graph, factory);
BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType::Float32)));
- BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType::Float32)));
- BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType::Float32)));
- BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType::Float32)));
+ BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename Convolution2dWorkload>
+template <typename Convolution2dWorkload, armnn::DataType DataType>
std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
Convolution2dDescriptor layerDesc;
layerDesc.m_PadLeft = 3;
layerDesc.m_PadRight = 3;
@@ -187,24 +188,22 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
- Convolution2dWorkload::ms_DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
- (TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType)));
+ layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3}, DataType));
+ layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- Connect(input, layer, TensorInfo({2, 3, 8, 16}, Convolution2dWorkload::ms_DataType));
- Connect(layer, output, TensorInfo({2, 2, 2, 10}, Convolution2dWorkload::ms_DataType));
+ // Connecst up.
+ Connect(input, layer, TensorInfo({2, 3, 8, 16}, DataType));
+ Connect(layer, output, TensorInfo({2, 2, 2, 10}, DataType));
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
@@ -218,20 +217,123 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 5, 3},
- Convolution2dWorkload::ms_DataType)));
+ BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 5, 3}, DataType)));
BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
- TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType))));
+ TensorInfo({2}, GetBiasDataType(DataType))));
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename Convolution2dWorkload>
+template <typename LstmWorkload>
+std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // This parameter setting is for withCifgWithPeepholeNoProjection
+ LstmDescriptor layerDesc;
+ layerDesc.m_ActivationFunc = 4;
+ layerDesc.m_ClippingThresCell = 0.0f;
+ layerDesc.m_ClippingThresProj = 0.0f;
+ layerDesc.m_CifgEnabled = true;
+ layerDesc.m_PeepholeEnabled = true;
+ layerDesc.m_ProjectionEnabled = false;
+
+ LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
+ unsigned int batchSize = 2;
+ unsigned int inputSize = 2;
+ unsigned int numUnits = 4;
+ unsigned int outputSize = 4;
+
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits, inputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits, inputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits, inputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits, outputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits, outputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits, outputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+
+ layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
+ layer->m_BasicParameters.m_InputToCellWeights->Allocate();
+ layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
+ layer->m_BasicParameters.m_ForgetGateBias->Allocate();
+ layer->m_BasicParameters.m_CellBias->Allocate();
+ layer->m_BasicParameters.m_OutputGateBias->Allocate();
+
+
+ if (layerDesc.m_PeepholeEnabled)
+ {
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
+ layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
+ }
+
+ // create input and output layers
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
+ Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
+ Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
+ Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
+ Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
+ Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
+
+ // connect up
+ armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
+ armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
+ armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
+ armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits*3 }, DataType::Float32);
+ if (layerDesc.m_CifgEnabled)
+ {
+ lstmTensorInfoScratchBuff.SetShape({ batchSize, numUnits*4 });
+ }
+
+ Connect(input, layer, lstmTensorInfo1, 0, 0);
+ Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
+ Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
+ Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
+ Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
+ Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
+ Connect(layer, output, lstmTensorInfo3, 3, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ // make the workload and check it
+ auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, graph, factory);
+ LstmQueueDescriptor queueDescriptor = workload->GetData();
+ BOOST_TEST(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 4);
+
+ BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
+ DataType::Float32)));
+ BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
+ DataType::Float32)));
+ BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
+ return workload;
+}
+
+template <typename Convolution2dWorkload, armnn::DataType DataType>
std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
Convolution2dDescriptor layerDesc;
layerDesc.m_PadLeft = 1;
layerDesc.m_PadRight = 1;
@@ -243,26 +345,25 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- float inputsQScale = Convolution2dWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = Convolution2dWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 },
- Convolution2dWorkload::ms_DataType, inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
- (TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType), inputsQScale));
+ (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- Connect(input, layer, TensorInfo({2, 3, 6, 6}, Convolution2dWorkload::ms_DataType, inputsQScale));
- Connect(layer, output, TensorInfo({2, 2, 6, 6}, Convolution2dWorkload::ms_DataType, outputQScale));
+ // Connects up.
+ Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale));
+ Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale));
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
@@ -277,11 +378,11 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
- Convolution2dWorkload::ms_DataType, inputsQScale)));
+ DataType, inputsQScale)));
BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo()
- == TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType), inputsQScale)));
+ == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)));
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
@@ -289,7 +390,7 @@ template <typename DepthwiseConvolution2dFloat32Workload>
std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest(
armnn::IWorkloadFactory& factory, armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
DepthwiseConvolution2dDescriptor layerDesc;
layerDesc.m_PadLeft = 3;
layerDesc.m_PadRight = 3;
@@ -306,16 +407,16 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
+ // Connects up.
Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, graph, factory);
DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
@@ -332,41 +433,39 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({3, 3, 5, 3}, DataType::Float32)));
BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({9}, DataType::Float32)));
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename FullyConnectedWorkload>
+template <typename FullyConnectedWorkload, armnn::DataType DataType>
std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
FullyConnectedDescriptor layerDesc;
layerDesc.m_BiasEnabled = true;
layerDesc.m_TransposeWeightMatrix = true;
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = FullyConnectedWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = FullyConnectedWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
- FullyConnectedWorkload::ms_DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
- GetBiasDataType(FullyConnectedWorkload::ms_DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, FullyConnectedWorkload::ms_DataType, inputsQScale));
- Connect(layer, output, TensorInfo({3, 7}, FullyConnectedWorkload::ms_DataType, outputQScale));
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale));
+ Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, graph, factory);
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
@@ -375,50 +474,48 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() ==
- TensorInfo({7, 20}, FullyConnectedWorkload::ms_DataType, inputsQScale)));
- BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
- TensorInfo({7}, GetBiasDataType(FullyConnectedWorkload::ms_DataType), inputsQScale)));
+ BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
+ BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename MultiplicationWorkload>
+template <typename MultiplicationWorkload, armnn::DataType DataType>
std::unique_ptr<MultiplicationWorkload> CreateMultiplicationWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
Layer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
- // create extra layers
+ // Creates extra layers.
Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo tensorInfo({2, 3}, MultiplicationWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
Connect(input1, layer, tensorInfo, 0, 0);
Connect(input2, layer, tensorInfo, 0, 1);
Connect(layer, output, tensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<MultiplicationWorkload>(*layer, graph, factory);
MultiplicationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename NormalizationFloat32Workload>
+template <typename NormalizationFloat32Workload, armnn::DataType DataType>
std::unique_ptr<NormalizationFloat32Workload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
NormalizationDescriptor layerDesc;
layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across;
layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
@@ -429,16 +526,16 @@ std::unique_ptr<NormalizationFloat32Workload> CreateNormalizationWorkloadTest(ar
NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer");
- // create extra layers
+ // Creatse extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- Connect(input, layer, TensorInfo({3, 5, 5, 1}, armnn::DataType::Float32));
- Connect(layer, output, TensorInfo({3, 5, 5, 1}, armnn::DataType::Float32));
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 5, 5, 1}, DataType));
+ Connect(layer, output, TensorInfo({3, 5, 5, 1}, DataType));
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<NormalizationFloat32Workload>(*layer, graph, factory);
NormalizationQueueDescriptor queueDescriptor = workload->GetData();
@@ -452,15 +549,15 @@ std::unique_ptr<NormalizationFloat32Workload> CreateNormalizationWorkloadTest(ar
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename Pooling2dWorkload>
+template <typename Pooling2dWorkload, armnn::DataType DataType>
std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
Pooling2dDescriptor layerDesc;
layerDesc.m_PoolType = PoolingAlgorithm::Average;
layerDesc.m_PoolWidth = 3;
@@ -475,16 +572,16 @@ std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadF
Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer");
- // create extra layers
+ // Create extra layers
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- Connect(input, layer, TensorInfo({3, 2, 5, 5}, Pooling2dWorkload::ms_DataType));
- Connect(layer, output, TensorInfo({3, 2, 2, 4}, Pooling2dWorkload::ms_DataType));
+ // Connect up
+ Connect(input, layer, TensorInfo({3, 2, 5, 5}, DataType));
+ Connect(layer, output, TensorInfo({3, 2, 2, 4}, DataType));
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Make the workload and checks it
auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, graph, factory);
Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
@@ -502,70 +599,70 @@ std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadF
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Return so we can do extra, backend-specific tests
return workload;
}
-template <typename SoftmaxWorkload>
+template <typename SoftmaxWorkload, armnn::DataType DataType>
std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Create the layer we're testing.
SoftmaxDescriptor softmaxDescriptor;
Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
- // create extra layers
+ // Create extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo tensorInfo({4, 1}, SoftmaxWorkload::ms_DataType);
+ // Connect up
+ armnn::TensorInfo tensorInfo({4, 1}, DataType);
Connect(input, layer, tensorInfo);
Connect(layer, output, tensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Make the workload and checks it.
auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, graph, factory);
SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Return so we can do extra, backend-specific tests.
return workload;
}
-template<typename SplitterWorkload>
+template<typename SplitterWorkload, armnn::DataType DataType>
std::unique_ptr<SplitterWorkload>
CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
{
- // create the layer we're testing
+ // Create the layer we're testing.
// NOTE: need three dimensions channels, height/y, width/x because the Compute
// library restricts subtensors to have the same x and y dimensions as
// their parent tensors, and therefore the origin on the x and y dimension
// has to be zero for any view. So we need a third dimension to split...
- // NOTE: arguments are: number of views, number of dimensions
+ // NOTE: arguments are: number of views, number of dimensions.
ViewsDescriptor layerDesc(3, 3);
- // NOTE: arguments are: view, dimension, value
+ // NOTE: arguments are: view, dimension, value.
layerDesc.SetViewOriginCoord(0, 0, 0);
layerDesc.SetViewOriginCoord(1, 0, 1);
layerDesc.SetViewOriginCoord(2, 0, 3);
Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer");
- // add extra layers
+ // Adds extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0");
Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
- // connect up
- armnn::TensorInfo tensorInfo({5, 7, 7}, SplitterWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo tensorInfo({5, 7, 7}, DataType);
Connect(input, layer, tensorInfo);
- armnn::TensorInfo output0Info({1, 7, 7}, SplitterWorkload::ms_DataType);
- armnn::TensorInfo output1Info({2, 7, 7}, SplitterWorkload::ms_DataType);
- armnn::TensorInfo output2Info({2, 7, 7}, SplitterWorkload::ms_DataType);
+ armnn::TensorInfo output0Info({1, 7, 7}, DataType);
+ armnn::TensorInfo output1Info({2, 7, 7}, DataType);
+ armnn::TensorInfo output2Info({2, 7, 7}, DataType);
Connect(layer, output0, output0Info, 0, 0);
Connect(layer, output1, output1Info, 1, 0);
@@ -573,7 +670,7 @@ std::unique_ptr<SplitterWorkload>
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, graph, factory);
SplitterQueueDescriptor queueDescriptor = workload->GetData();
@@ -591,24 +688,21 @@ std::unique_ptr<SplitterWorkload>
BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-/// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads
-template<typename SplitterWorkload, typename MergerWorkload>
+/// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads.
+template<typename SplitterWorkload, typename MergerWorkload, armnn::DataType DataType>
std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
{
- static_assert(SplitterWorkload::ms_DataType == MergerWorkload::ms_DataType,
- "Splitter and merger workloads must have the same data type");
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
- armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, SplitterWorkload::ms_DataType);
+ armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType);
+ armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType);
- armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, SplitterWorkload::ms_DataType);
- armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, SplitterWorkload::ms_DataType);
-
- //construct the graph
+ //Constructs the graph.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
armnn::ViewsDescriptor splitterViews(2);
@@ -641,12 +735,12 @@ std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // add connections
+ // Adds connections.
Connect(input, splitter, inputTensorInfo, 0, 0);
BOOST_TEST_CHECKPOINT("connect input to splitter");
- Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up
+ Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up.
BOOST_TEST_CHECKPOINT("connect splitter[0] to merger[1]");
- Connect(splitter, merger, splitTensorInfo2, 1, 0); // so that the outputs are flipped round
+ Connect(splitter, merger, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
BOOST_TEST_CHECKPOINT("connect splitter[1] to merger[0]");
Connect(merger, output, inputTensorInfo, 0, 0);
BOOST_TEST_CHECKPOINT("connect merger to output");
@@ -665,7 +759,7 @@ std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then
/// connected to two different activation layers
-template<typename SplitterWorkload, typename ActivationWorkload>
+template<typename SplitterWorkload, typename ActivationWorkload, armnn::DataType DataType>
void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph,
std::unique_ptr<SplitterWorkload>& wlSplitter,
std::unique_ptr<ActivationWorkload>& wlActiv0_0,
@@ -673,14 +767,11 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory&
std::unique_ptr<ActivationWorkload>& wlActiv1_0,
std::unique_ptr<ActivationWorkload>& wlActiv1_1)
{
- static_assert(SplitterWorkload::ms_DataType == ActivationWorkload::ms_DataType,
- "Splitter and activation workloads must have the same data type");
-
- armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, SplitterWorkload::ms_DataType);
- armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, SplitterWorkload::ms_DataType);
- armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, SplitterWorkload::ms_DataType);
+ armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType);
+ armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType);
+ armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType);
- //construct the graph
+ //Constructs the graph.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
armnn::ViewsDescriptor splitterViews(2);
@@ -709,7 +800,7 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory&
Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3");
Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4");
- // add connections
+ // Adds connections.
Connect(input, splitter, inputTensorInfo, 0, 0);
Connect(splitter, activ0_0, splitTensorInfo1, 0, 0);
Connect(splitter, activ0_1, splitTensorInfo1, 0, 0);
@@ -737,97 +828,155 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory&
wlActiv1_1 = std::move(workloadActiv1_1);
}
-template <typename ResizeBilinearWorkload>
+template <typename ResizeBilinearWorkload, armnn::DataType DataType>
std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
TensorShape outputShape({ 2, 3, 2, 2 });
ResizeBilinearDescriptor resizeDesc;
resizeDesc.m_TargetWidth = outputShape[3];
resizeDesc.m_TargetHeight = outputShape[2];
Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo inputTensorInfo({ 2, 3, 4, 4 }, ResizeBilinearWorkload::ms_DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, ResizeBilinearWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 2, 3, 4, 4 }, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
Connect(input, layer, inputTensorInfo);
Connect(layer, output, outputTensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<ResizeBilinearWorkload>(*layer, graph, factory);
ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename L2NormalizationWorkload>
+template <typename L2NormalizationWorkload, armnn::DataType DataType>
std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
Layer* const layer = graph.AddLayer<L2NormalizationLayer>("l2norm");
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo inputTensorInfo({ 5, 20, 50, 67 }, L2NormalizationWorkload::ms_DataType);
- armnn::TensorInfo outputTensorInfo({ 5, 20, 50, 67 }, L2NormalizationWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 5, 20, 50, 67 }, DataType);
+ armnn::TensorInfo outputTensorInfo({ 5, 20, 50, 67 }, DataType);
Connect(input, layer, inputTensorInfo);
Connect(layer, output, outputTensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, graph, factory);
L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
return workload;
}
-template <typename ReshapeWorkload>
+template <typename ReshapeWorkload, armnn::DataType DataType>
std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
{
- // create the layer we're testing
+ // Creates the layer we're testing.
TensorShape outputShape({ 1, 4 });
ReshapeDescriptor reshapeDesc;
reshapeDesc.m_TargetShape = outputShape;
Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer");
- // create extra layers
+ // Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- // connect up
- armnn::TensorInfo inputTensorInfo({ 4, 1 }, ReshapeWorkload::ms_DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, ReshapeWorkload::ms_DataType);
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
Connect(input, layer, inputTensorInfo);
Connect(layer, output, outputTensorInfo);
CreateTensorHandles(graph, factory);
- // make the workload and check it
+ // Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, graph, factory);
ReshapeQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- // return so we can do extra, backend-specific tests
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename ConvertFp16ToFp32Float32Workload>
+std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32WorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ ConvertFp16ToFp32Layer* const layer = graph.AddLayer<ConvertFp16ToFp32Layer>("Fp16ToFp32Converter");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+ armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, graph, factory);
+
+ ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename ConvertFp32ToFp16Float16Workload>
+std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16WorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ ConvertFp32ToFp16Layer* const layer = graph.AddLayer<ConvertFp32ToFp16Layer>("Fp32ToFp16Converter");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, graph, factory);
+
+ ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
return workload;
}