aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-02-11 12:21:27 +0000
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-02-11 13:33:20 +0000
commit0085978ac40ecd008195d635cd009a1d4f49fb74 (patch)
tree560c296e74b94826d6338b7d0d92224ae526a426 /src/armnn/test
parent3dad5acc5d8eda6fc472b9a255c1d893d4e1f942 (diff)
downloadarmnn-0085978ac40ecd008195d635cd009a1d4f49fb74.tar.gz
IVGCVSW-2676 Make biases optional in ILayerVisitor for Convolution2D, DepthwiseConvolution2D and FullyConnected
Change-Id: I3048504ff699fdb266488e7c07b7262e5843d4b0 Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp481
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp117
-rw-r--r--src/armnn/test/QuantizerTest.cpp70
-rw-r--r--src/armnn/test/TestLayerVisitor.cpp10
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp233
5 files changed, 405 insertions, 506 deletions
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index 6ab2ea89a2..5b77ddeb97 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -122,11 +122,11 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestConvolution2dLayerVisitor visitor(descriptor, weights);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights);
layer->Accept(visitor);
@@ -146,11 +146,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, layerName);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, layerName);
layer->Accept(visitor);
@@ -170,16 +170,15 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases));
- TestConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases);
-
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases);
layer->Accept(visitor);
@@ -200,15 +199,15 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases, layerName);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases, layerName);
layer->Accept(visitor);
@@ -227,11 +226,11 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights);
layer->Accept(visitor);
@@ -251,11 +250,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, layerName);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, layerName);
layer->Accept(visitor);
@@ -275,15 +274,15 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestDepthwiseConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases));
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
layer->Accept(visitor);
@@ -304,15 +303,15 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestDepthwiseConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases, layerName);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases, layerName);
layer->Accept(visitor);
@@ -325,11 +324,11 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestFullyConnectedLayerVistor visitor(descriptor, weights);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional());
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights);
layer->Accept(visitor);
@@ -343,11 +342,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, layerName);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional(), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, layerName);
layer->Accept(visitor);
@@ -361,15 +360,15 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestFullyConnectedLayerWithBiasesVisitor visitor(descriptor, weights, biases);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, Optional<ConstTensor>(biases));
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases);
layer->Accept(visitor);
@@ -384,15 +383,15 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestFullyConnectedLayerWithBiasesVisitor visitor(descriptor, weights, biases, layerName);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases, layerName);
layer->Accept(visitor);
@@ -406,23 +405,23 @@ BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor mean(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
- armnn::ConstTensor variance(TensorInfo(4, varianceDimensions.data(), armnn::DataType::Float32), varianceData);
+ ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor beta(TensorInfo(4, betaDimensions.data(), armnn::DataType::Float32), betaData);
+ ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), armnn::DataType::Float32), gammaData);
+ ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
layer->Accept(visitor);
@@ -437,23 +436,23 @@ BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor mean(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
- armnn::ConstTensor variance(TensorInfo(4, varianceDimensions.data(), armnn::DataType::Float32), varianceData);
+ ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor beta(TensorInfo(4, betaDimensions.data(), armnn::DataType::Float32), betaData);
+ ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), armnn::DataType::Float32), gammaData);
+ ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
descriptor, mean, variance, beta, gamma, layerName);
@@ -464,11 +463,11 @@ BOOST_AUTO_TEST_CASE(CheckConstLayer)
{
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor input(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
TestConstantLayerVisitor visitor(input);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConstantLayer(input);
layer->Accept(visitor);
@@ -479,11 +478,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
const char* layerName = "ConstantLayer";
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor input(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
TestConstantLayerVisitor visitor(input, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
layer->Accept(visitor);
@@ -499,48 +498,48 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -555,7 +554,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -572,48 +571,48 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -628,7 +627,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -644,68 +643,68 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), armnn::DataType::Float32), inputToInputWeightsData);
+ ConstTensor inputToInputWeights(
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(TensorInfo(
+ 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToInputWeights(
- TensorInfo(4, cellToInputWeightsDimensions.data(), armnn::DataType::Float32), cellToInputWeightsData);
+ ConstTensor cellToInputWeights(
+ TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), armnn::DataType::Float32), inputGateBiasData);
+ ConstTensor inputGateBias(
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -725,7 +724,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -742,68 +741,68 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), armnn::DataType::Float32), inputToInputWeightsData);
+ ConstTensor inputToInputWeights(
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(TensorInfo(
+ 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToInputWeights(
- TensorInfo(4, cellToInputWeightsDimensions.data(), armnn::DataType::Float32), cellToInputWeightsData);
+ ConstTensor cellToInputWeights(
+ TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), armnn::DataType::Float32), inputGateBiasData);
+ ConstTensor inputGateBias(
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -823,7 +822,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -841,58 +840,58 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), armnn::DataType::Float32), cellToForgetWeightsData);
+ ConstTensor cellToForgetWeights(
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), armnn::DataType::Float32), cellToOutputWeightsData);
+ ConstTensor cellToOutputWeights(
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -910,7 +909,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -928,58 +927,58 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), armnn::DataType::Float32), cellToForgetWeightsData);
+ ConstTensor cellToForgetWeights(
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), armnn::DataType::Float32), cellToOutputWeightsData);
+ ConstTensor cellToOutputWeights(
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -997,7 +996,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -1015,58 +1014,58 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionBias(
- TensorInfo(4, projectionBiasDimensions.data(), armnn::DataType::Float32), projectionBiasData);
+ ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionWeights(
- TensorInfo(4, projectionWeightsDimensions.data(), armnn::DataType::Float32), projectionWeightsData);
+ ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1084,7 +1083,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1102,58 +1101,58 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionBias(
- TensorInfo(4, projectionBiasDimensions.data(), armnn::DataType::Float32), projectionBiasData);
+ ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionWeights(
- TensorInfo(4, projectionWeightsDimensions.data(), armnn::DataType::Float32), projectionWeightsData);
+ ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1171,7 +1170,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 3b0f723542..513a471465 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -16,58 +16,34 @@ class TestConvolution2dLayerVisitor : public TestLayerVisitor
public:
explicit TestConvolution2dLayerVisitor(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) : TestLayerVisitor(name),
m_Descriptor(convolution2dDescriptor),
- m_Weights(weights) {};
+ m_Weights(weights),
+ m_Biases(biases) {};
virtual ~TestConvolution2dLayerVisitor() {};
void VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(convolution2dDescriptor);
CheckConstTensors(m_Weights, weights);
+ CheckOptionalConstTensors(m_Biases, biases);
}
protected:
void CheckDescriptor(const Convolution2dDescriptor& convolution2dDescriptor);
private:
- armnn::Convolution2dDescriptor m_Descriptor;
- armnn::ConstTensor m_Weights;
-};
-
-class TestConvolution2dWithBiasLayerVisitor : public TestConvolution2dLayerVisitor
-{
-public:
- explicit TestConvolution2dWithBiasLayerVisitor(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) :
- TestConvolution2dLayerVisitor(
- convolution2dDescriptor, weights, name),
- m_Biases(biases) {};
-
- // needed to suppress crappy error message about base class function i.e. version
- // without the biases argument being hidden
- using TestConvolution2dLayerVisitor::VisitConvolution2dLayer;
-
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override
- {
- TestConvolution2dLayerVisitor::VisitConvolution2dLayer(layer, convolution2dDescriptor, weights, name);
- CheckConstTensors(m_Biases, biases);
- }
-
-private:
- armnn::ConstTensor m_Biases;
+ Convolution2dDescriptor m_Descriptor;
+ ConstTensor m_Weights;
+ Optional<ConstTensor> m_Biases;
};
class TestDepthwiseConvolution2dLayerVisitor : public TestLayerVisitor
@@ -75,60 +51,34 @@ class TestDepthwiseConvolution2dLayerVisitor : public TestLayerVisitor
public:
explicit TestDepthwiseConvolution2dLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) : TestLayerVisitor(name),
m_Descriptor(descriptor),
- m_Weights(weights) {};
+ m_Weights(weights),
+ m_Biases(biases) {};
virtual ~TestDepthwiseConvolution2dLayerVisitor() {};
void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(convolution2dDescriptor);
CheckConstTensors(m_Weights, weights);
+ CheckOptionalConstTensors(m_Biases, biases);
}
protected:
void CheckDescriptor(const DepthwiseConvolution2dDescriptor& convolution2dDescriptor);
private:
- armnn::DepthwiseConvolution2dDescriptor m_Descriptor;
- armnn::ConstTensor m_Weights;
-};
-
-class TestDepthwiseConvolution2dWithBiasLayerVisitor : public TestDepthwiseConvolution2dLayerVisitor
-{
-public:
- explicit TestDepthwiseConvolution2dWithBiasLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) :
- TestDepthwiseConvolution2dLayerVisitor(descriptor, weights, name),
- m_Biases(biases) {};
-
- ~TestDepthwiseConvolution2dWithBiasLayerVisitor() {};
-
- // needed to suppress crappy error message about base class function i.e. version
- // without the biases argument being hidden
- using TestDepthwiseConvolution2dLayerVisitor::VisitDepthwiseConvolution2dLayer;
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override
- {
- TestDepthwiseConvolution2dLayerVisitor::VisitDepthwiseConvolution2dLayer(
- layer, convolution2dDescriptor, weights, name);
- CheckConstTensors(m_Biases, biases);
- }
-
-private:
- armnn::ConstTensor m_Biases;
+ DepthwiseConvolution2dDescriptor m_Descriptor;
+ ConstTensor m_Weights;
+ Optional<ConstTensor> m_Biases;
};
class TestFullyConnectedLayerVistor : public TestLayerVisitor
@@ -136,21 +86,25 @@ class TestFullyConnectedLayerVistor : public TestLayerVisitor
public:
explicit TestFullyConnectedLayerVistor(const FullyConnectedDescriptor& descriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor> biases,
const char* name = nullptr) : TestLayerVisitor(name),
m_Descriptor(descriptor),
- m_Weights(weights) {};
+ m_Weights(weights),
+ m_Biases(biases) {};
virtual ~TestFullyConnectedLayerVistor() {};
void VisitFullyConnectedLayer(const IConnectableLayer* layer,
const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(fullyConnectedDescriptor);
CheckConstTensors(m_Weights, weights);
+ CheckOptionalConstTensors(m_Biases, biases);
}
protected:
@@ -158,34 +112,7 @@ protected:
private:
FullyConnectedDescriptor m_Descriptor;
ConstTensor m_Weights;
-};
-
-class TestFullyConnectedLayerWithBiasesVisitor : public TestFullyConnectedLayerVistor
-{
-public:
- explicit TestFullyConnectedLayerWithBiasesVisitor(const FullyConnectedDescriptor& descriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) :
- TestFullyConnectedLayerVistor(descriptor, weights, name),
- m_Biases(biases) {};
-
- // needed to suppress crappy error message about base class function i.e. version
- // without the biases argument being hidden
- using TestFullyConnectedLayerVistor::VisitFullyConnectedLayer;
-
- void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override
- {
- TestFullyConnectedLayerVistor::VisitFullyConnectedLayer(layer, fullyConnectedDescriptor, weights, name);
- CheckConstTensors(m_Biases, biases);
- }
-
-private:
- ConstTensor m_Biases;
+ Optional<ConstTensor> m_Biases;
};
class TestBatchNormalizationLayerVisitor : public TestLayerVisitor
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index a960c6b772..90935f37f8 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -500,10 +500,11 @@ INetworkPtr CreateNetworkWithFullyConnectedLayer(const bool biasEnabled)
class TestFullyConnectedQuantization : public TestQuantization
{
public:
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const char* name = nullptr)
+ void VisitFullyConnectedLayer(const IConnectableLayer* layer,
+ const FullyConnectedDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -514,37 +515,17 @@ public:
// Based off current static value [-15.0f, 15.0f]
BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
- //Test constants
+ //Test weights
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
-
BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
-
BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
- }
-
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const ConstTensor& bias,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
-
- BOOST_TEST((info.GetQuantizationOffset() == 128));
-
- // Based off current static value [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
- //Test constants
- BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((bias.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
-
- BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
- BOOST_CHECK_CLOSE(bias.GetInfo().GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
-
- BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
+ // Test biases
+ if (biases.has_value())
+ {
+ BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
+ }
}
};
@@ -570,8 +551,9 @@ class TestConv2dQuantization : public TestQuantization
{
public:
virtual void VisitConvolution2dLayer(const IConnectableLayer *layer,
- const Convolution2dDescriptor &convolution2dDescriptor,
- const ConstTensor &weights,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char *name = nullptr)
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -581,24 +563,18 @@ public:
// Based off current static value [-15.0f, 15.0f]
BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / 255.0f, 0.000001f);
- // test weights const
+ // Test weitghs
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
- }
- virtual void VisitConvolution2dLayer(const IConnectableLayer *layer,
- const Convolution2dDescriptor &convolution2dDescriptor,
- const ConstTensor &weights,
- const ConstTensor &biases,
- const char *name = nullptr)
- {
- VisitConvolution2dLayer(layer, convolution2dDescriptor, weights, name);
-
- // test biases const
- BOOST_TEST((biases.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_CHECK_CLOSE(biases.GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
- BOOST_TEST((biases.GetInfo().GetQuantizationOffset() == 85));
+ // Test biases
+ if (biases.has_value())
+ {
+ BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
+ BOOST_TEST((biases.value().GetInfo().GetQuantizationOffset() == 85));
+ }
}
};
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index 932aef6deb..4c028b3709 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -48,4 +48,14 @@ void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const Cons
}
}
+void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
+ const Optional<ConstTensor>& actual)
+{
+ BOOST_CHECK(expected.has_value() == actual.has_value());
+ if (expected.has_value() && actual.has_value())
+ {
+ CheckConstTensors(expected.value(), actual.value());
+ }
+}
+
} //namespace armnn
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index fe2631fa39..8c0da50be3 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -21,6 +21,8 @@ protected:
void CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual);
+ void CheckOptionalConstTensors(const Optional<ConstTensor>& expected, const Optional<ConstTensor>& actual);
+
private:
const char* m_LayerName;
@@ -33,161 +35,146 @@ public:
}
}
- virtual void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) {}
-
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) {}
-
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) {}
-
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) {}
-
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) {}
-
- virtual void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
- const DetectionPostProcessDescriptor& descriptor,
- const ConstTensor& anchors,
- const char* name = nullptr) {}
-
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) {}
+ void VisitInputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override {}
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ void VisitConvolution2dLayer(const IConnectableLayer* layer,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override {}
+
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) {}
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override {}
+
+ void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
+ const DetectionPostProcessDescriptor& descriptor,
+ const ConstTensor& anchors,
+ const char* name = nullptr) override {}
+
+ void VisitFullyConnectedLayer(const IConnectableLayer* layer,
+ const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override {}
- virtual void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) {}
+ void VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& permuteDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
- const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) {}
+ void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
+ const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) {}
+ void VisitPooling2dLayer(const IConnectableLayer* layer,
+ const Pooling2dDescriptor& pooling2dDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) {}
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& activationDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitNormalizationLayer(const IConnectableLayer* layer,
- const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) {}
+ void VisitNormalizationLayer(const IConnectableLayer* layer,
+ const NormalizationDescriptor& normalizationDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) {}
+ void VisitSoftmaxLayer(const IConnectableLayer* layer,
+ const SoftmaxDescriptor& softmaxDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitSplitterLayer(const IConnectableLayer* layer,
- const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) {}
+ void VisitSplitterLayer(const IConnectableLayer* layer,
+ const ViewsDescriptor& splitterDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) {}
+ void VisitMergerLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& mergerDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitAdditionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitAdditionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitMultiplicationLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitMultiplicationLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& desc,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) {}
+ void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
+ const BatchNormalizationDescriptor& desc,
+ const ConstTensor& mean,
+ const ConstTensor& variance,
+ const ConstTensor& beta,
+ const ConstTensor& gamma,
+ const char* name = nullptr) override {}
- virtual void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) {}
+ void VisitResizeBilinearLayer(const IConnectableLayer* layer,
+ const ResizeBilinearDescriptor& resizeDesc,
+ const char* name = nullptr) override {}
- virtual void VisitL2NormalizationLayer(const IConnectableLayer* layer,
- const L2NormalizationDescriptor& desc,
- const char* name = nullptr) {}
+ void VisitL2NormalizationLayer(const IConnectableLayer* layer,
+ const L2NormalizationDescriptor& desc,
+ const char* name = nullptr) override {}
- virtual void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr) {}
+ void VisitConstantLayer(const IConnectableLayer* layer,
+ const ConstTensor& input,
+ const char* name = nullptr) override {}
- virtual void VisitReshapeLayer(const IConnectableLayer* layer,
- const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) {}
+ void VisitReshapeLayer(const IConnectableLayer* layer,
+ const ReshapeDescriptor& reshapeDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) {}
+ void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
+ const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitFloorLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitFloorLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) {}
+ void VisitOutputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override {}
- virtual void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) {}
+ void VisitLstmLayer(const IConnectableLayer* layer,
+ const LstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name = nullptr) override {}
- virtual void VisitDivisionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitDivisionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitSubtractionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitSubtractionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitMaximumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitMaximumLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitMeanLayer(const IConnectableLayer* layer,
- const MeanDescriptor& meanDescriptor,
- const char* name = nullptr) {}
+ void VisitMeanLayer(const IConnectableLayer* layer,
+ const MeanDescriptor& meanDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitPadLayer(const IConnectableLayer* layer,
- const PadDescriptor& padDescriptor,
- const char* name = nullptr) {}
+ void VisitPadLayer(const IConnectableLayer* layer,
+ const PadDescriptor& padDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitStridedSliceLayer(const IConnectableLayer* layer,
- const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) {}
+ void VisitStridedSliceLayer(const IConnectableLayer* layer,
+ const StridedSliceDescriptor& stridedSliceDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitMinimumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitMinimumLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitGreaterLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitGreaterLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitEqualLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitEqualLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitRsqrtLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitRsqrtLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitGatherLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitGatherLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
};
} //namespace armnn