diff options
author | James Conroy <james.conroy@arm.com> | 2021-04-27 17:13:27 +0100 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2021-05-06 14:40:40 +0000 |
commit | 1f58f03d82c482626b1b4673b6c0e25da4338fb5 (patch) | |
tree | e92451e00d459a2fc0d870694460f482aa4c77ae /src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp | |
parent | a7a12f5c3654da554ad6197beff0f0fc54681c92 (diff) | |
download | armnn-1f58f03d82c482626b1b4673b6c0e25da4338fb5.tar.gz |
IVGCVSW-5815 Generalise ConstCpuTensorHandle
* Generalises ConstCpuTensorHandle and inherited
classes by removing 'Cpu' from aliases.
* New renamed classes: ConstTensorHandle, TensorHandle,
ScopedTensorHandle, PassthroughTensorHandle,
ConstPassthroughTensorHandle.
Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16
Diffstat (limited to 'src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp | 106 |
1 files changed, 53 insertions, 53 deletions
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index b73efbe26c..4240bb1061 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -83,13 +83,13 @@ struct DummyLayer<armnn::BatchNormalizationLayer> DummyLayer() { m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), ""); - m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } @@ -240,9 +240,9 @@ struct DummyConvolutionLayer desc.m_StrideX = 1; desc.m_StrideY = 1; m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, ""); - m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } @@ -278,7 +278,7 @@ struct DummyLayer<armnn::DetectionPostProcessLayer> DummyLayer() { m_Layer = dummyGraph.AddLayer<armnn::DetectionPostProcessLayer>(armnn::DetectionPostProcessDescriptor(), ""); - m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } @@ -299,30 +299,30 @@ struct DummyLstmLayer desc.m_CifgEnabled = false; m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), ""); - m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } @@ -354,57 +354,57 @@ struct DummyQLstmLayer m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm"); // Basic params - m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); - m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); - m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); // CIFG optional params - m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); // Projection optional params - m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8)); - m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); // Peephole optional params - m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); - m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); - m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); // Layer normalization optional params - m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); - m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); - m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); - m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16)); } @@ -423,31 +423,31 @@ struct DummyLayer<armnn::QuantizedLstmLayer, void> { m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>(""); - m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8)); - m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); - m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); - m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); - m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); } @@ -466,7 +466,7 @@ struct DummyLayer<armnn::FullyConnectedLayer> { armnn::FullyConnectedLayer::DescriptorType desc; m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, ""); - m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>( + m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } |