aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjimfly01 <jim.flynn@arm.com>2019-02-05 09:20:11 +0000
committerJim Flynn <jim.flynn@arm.com>2019-02-07 16:00:12 +0000
commitaebf2e7871997378d33fa99f7cab6322be9b726f (patch)
treeaef3f6fd05aafbee10100de85ec2e766d4f92ef7
parent857aa45407df9dbe99a11d03a4be2b20bd0110ae (diff)
downloadarmnn-aebf2e7871997378d33fa99f7cab6322be9b726f.tar.gz
IVGCVSW-2553 Add Unit Test for ConstTensor layer visitors
Change-Id: I20a67135e4af7aa1f28f5000d73122f4e5e3acd5 Signed-off-by: Jim Flynn <jim.flynn@arm.com>
-rw-r--r--CMakeLists.txt2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp3
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp4
-rw-r--r--src/armnn/layers/LstmLayer.cpp34
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp1182
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp280
-rw-r--r--src/armnn/test/TestLayerVisitor.cpp18
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp3
11 files changed, 1511 insertions, 25 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9eb184f4d1..8d43ef8d8b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -407,6 +407,8 @@ endif()
if(BUILD_UNIT_TESTS)
set(unittest_sources)
list(APPEND unittest_sources
+ src/armnn/test/ConstTensorLayerVisitor.hpp
+ src/armnn/test/ConstTensorLayerVisitor.cpp
src/armnn/test/CreateWorkload.hpp
src/armnn/test/CsvReaderTest.cpp
src/armnn/test/EndToEndTest.cpp
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 2212f47d3c..725dbd88b2 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -75,7 +75,8 @@ void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
ConstTensor varianceTensor(m_Variance->GetTensorInfo(), m_Variance->Map(true));
ConstTensor betaTensor(m_Beta->GetTensorInfo(), m_Beta->Map(true));
ConstTensor gammaTensor(m_Gamma->GetTensorInfo(), m_Gamma->Map(true));
- visitor.VisitBatchNormalizationLayer(this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor);
+ visitor.VisitBatchNormalizationLayer(
+ this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
}
} // namespace armnn
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 919fd61087..31e1549e0e 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -53,7 +53,7 @@ void ConstantLayer::ValidateTensorShapesFromInputs()
void ConstantLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor layerOutputTensor(m_LayerOutput->GetTensorInfo(), m_LayerOutput->GetTensor<void*>()) ;
+ ConstTensor layerOutputTensor(m_LayerOutput->GetTensorInfo(), m_LayerOutput->Map(true)) ;
visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
}
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index cb90f81e23..2c0997a9d0 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -112,10 +112,10 @@ Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->GetTensor<void*>()) ;
- ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void*>());
+ ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
if (GetParameters().m_BiasEnabled)
{
+ ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true));
visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, biasTensor, GetName());
}
else
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index dca13f263c..a17673fc1e 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -122,10 +122,10 @@ Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->GetTensor<void*>()) ;
- ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void*>());
+ ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
if (GetParameters().m_BiasEnabled)
{
+ ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true));
visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, biasTensor, GetName());
}
else
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 783482e013..219113bca6 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -88,10 +88,10 @@ Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->GetTensor<void*>()) ;
- ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void*>());
+ ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
if (GetParameters().m_BiasEnabled)
{
+ ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void>());
visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, biasTensor, GetName());
}
else
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 06140c924f..fa836d0317 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -255,107 +255,107 @@ void LstmLayer::Accept(ILayerVisitor& visitor) const
if (m_CifgParameters.m_InputToInputWeights != nullptr)
{
ConstTensor inputToInputWeightsTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(),
- m_CifgParameters.m_InputToInputWeights->GetConstTensor<void*>());
+ m_CifgParameters.m_InputToInputWeights->Map(true));
inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
}
if (m_BasicParameters.m_InputToForgetWeights != nullptr)
{
ConstTensor inputToForgetWeightsTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(),
- m_BasicParameters.m_InputToForgetWeights->GetConstTensor<void*>());
+ m_BasicParameters.m_InputToForgetWeights->Map(true));
inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
}
if (m_BasicParameters.m_InputToCellWeights != nullptr)
{
ConstTensor inputToCellWeightsTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(),
- m_BasicParameters.m_InputToCellWeights->GetConstTensor<void*>());
+ m_BasicParameters.m_InputToCellWeights->Map(true));
inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
}
if (m_BasicParameters.m_InputToOutputWeights != nullptr)
{
ConstTensor inputToOutputWeightsTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(),
- m_BasicParameters.m_InputToOutputWeights->GetConstTensor<void*>());
+ m_BasicParameters.m_InputToOutputWeights->Map(true));
inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
}
if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
{
ConstTensor recurrentToInputWeightsTensor(
m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(),
- m_CifgParameters.m_RecurrentToInputWeights->GetConstTensor<void*>());
+ m_CifgParameters.m_RecurrentToInputWeights->Map(true));
inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
}
if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
{
ConstTensor recurrentToForgetWeightsTensor(
m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
- m_BasicParameters.m_RecurrentToForgetWeights->GetConstTensor<void*>());
+ m_BasicParameters.m_RecurrentToForgetWeights->Map(true));
inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
}
if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
{
ConstTensor recurrentToCellWeightsTensor(
m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(),
- m_BasicParameters.m_RecurrentToCellWeights->GetConstTensor<void*>());
+ m_BasicParameters.m_RecurrentToCellWeights->Map(true));
inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
}
if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
{
ConstTensor recurrentToOutputWeightsTensor(
m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
- m_BasicParameters.m_RecurrentToOutputWeights->GetConstTensor<void*>());
+ m_BasicParameters.m_RecurrentToOutputWeights->Map(true));
inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
}
if (m_CifgParameters.m_CellToInputWeights != nullptr)
{
ConstTensor cellToInputWeightsTensor(m_CifgParameters.m_CellToInputWeights->GetTensorInfo(),
- m_CifgParameters.m_CellToInputWeights->GetConstTensor<void*>());
+ m_CifgParameters.m_CellToInputWeights->Map(true));
inputParams.m_CellToInputWeights = &cellToInputWeightsTensor;
}
if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
{
ConstTensor cellToForgetWeightsTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(),
- m_PeepholeParameters.m_CellToForgetWeights->GetConstTensor<void*>());
+ m_PeepholeParameters.m_CellToForgetWeights->Map(true));
inputParams.m_CellToForgetWeights = &cellToForgetWeightsTensor;
}
if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
{
ConstTensor cellToOutputWeightsTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(),
- m_PeepholeParameters.m_CellToOutputWeights->GetConstTensor<void*>());
+ m_PeepholeParameters.m_CellToOutputWeights->Map(true));
inputParams.m_CellToOutputWeights = &cellToOutputWeightsTensor;
}
if (m_CifgParameters.m_InputGateBias != nullptr)
{
ConstTensor inputGateBiasTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(),
- m_CifgParameters.m_InputGateBias->GetConstTensor<void*>());
+ m_CifgParameters.m_InputGateBias->Map(true));
inputParams.m_InputGateBias = &inputGateBiasTensor;
}
if (m_BasicParameters.m_ForgetGateBias != nullptr)
{
ConstTensor forgetGateBiasTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(),
- m_BasicParameters.m_ForgetGateBias->GetConstTensor<void*>());
+ m_BasicParameters.m_ForgetGateBias->Map(true));
inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
}
if (m_BasicParameters.m_CellBias != nullptr)
{
ConstTensor cellBiasTensor(m_BasicParameters.m_CellBias->GetTensorInfo(),
- m_BasicParameters.m_CellBias->GetConstTensor<void*>());
+ m_BasicParameters.m_CellBias->Map(true));
inputParams.m_CellBias = &cellBiasTensor;
}
if (m_BasicParameters.m_OutputGateBias != nullptr)
{
ConstTensor outputGateBias(m_BasicParameters.m_OutputGateBias->GetTensorInfo(),
- m_BasicParameters.m_OutputGateBias->GetConstTensor<void*>());
+ m_BasicParameters.m_OutputGateBias->Map(true));
inputParams.m_OutputGateBias = &outputGateBias;
}
if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
{
ConstTensor projectionWeightsTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(),
- m_ProjectionParameters.m_ProjectionWeights->GetConstTensor<void*>());
+ m_ProjectionParameters.m_ProjectionWeights->Map(true));
inputParams.m_ProjectionWeights = &projectionWeightsTensor;
}
if (m_ProjectionParameters.m_ProjectionBias != nullptr)
{
ConstTensor projectionBiasTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(),
- m_ProjectionParameters.m_ProjectionBias->GetConstTensor<void*>());
+ m_ProjectionParameters.m_ProjectionBias->Map(true));
inputParams.m_ProjectionBias = &projectionBiasTensor;
}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
new file mode 100644
index 0000000000..c34152fee5
--- /dev/null
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -0,0 +1,1182 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConstTensorLayerVisitor.hpp"
+#include "Network.hpp"
+
+#include <boost/test/unit_test.hpp>
+
+namespace armnn
+{
+
+void TestConvolution2dLayerVisitor::CheckDescriptor(const Convolution2dDescriptor &convolution2dDescriptor)
+{
+ BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
+ BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
+ BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
+ BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
+ BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
+ BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
+ BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
+ BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
+}
+
+void TestDepthwiseConvolution2dLayerVisitor::CheckDescriptor(
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor)
+{
+ BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
+ BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
+ BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
+ BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
+ BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
+ BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
+ BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
+ BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
+}
+
+void TestFullyConnectedLayerVistor::CheckDescriptor(const FullyConnectedDescriptor& descriptor)
+{
+ BOOST_CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
+ BOOST_CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
+}
+
+void TestBatchNormalizationLayerVisitor::CheckDescriptor(const BatchNormalizationDescriptor& descriptor)
+{
+ BOOST_CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
+ BOOST_CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
+}
+
+void TestLstmLayerVisitor::CheckDescriptor(const LstmDescriptor& descriptor)
+{
+ BOOST_CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
+ BOOST_CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
+ BOOST_CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
+ BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
+ BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
+ BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
+}
+
+void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const ConstTensor* actual)
+{
+ if (expected == nullptr)
+ {
+ BOOST_TEST(actual == nullptr, name + " actual should have been a nullptr");
+ }
+ else
+ {
+ BOOST_TEST(actual != nullptr, name + " actual should have been set");
+ if (actual != nullptr)
+ {
+ CheckConstTensors(*expected, *actual);
+ }
+ }
+}
+
+void TestLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
+{
+ CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
+ CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
+ CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
+ CheckConstTensorPtrs("InputToInputWeights",
+ m_InputParams.m_InputToInputWeights, inputParams.m_InputToInputWeights);
+ CheckConstTensorPtrs("InputToForgetWeights",
+ m_InputParams.m_InputToForgetWeights, inputParams.m_InputToForgetWeights);
+ CheckConstTensorPtrs("InputToCellWeights", m_InputParams.m_InputToCellWeights, inputParams.m_InputToCellWeights);
+ CheckConstTensorPtrs(
+ "InputToOutputWeights", m_InputParams.m_InputToOutputWeights, inputParams.m_InputToOutputWeights);
+ CheckConstTensorPtrs(
+ "RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, inputParams.m_RecurrentToInputWeights);
+ CheckConstTensorPtrs(
+ "RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, inputParams.m_RecurrentToForgetWeights);
+ CheckConstTensorPtrs(
+ "RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, inputParams.m_RecurrentToCellWeights);
+ CheckConstTensorPtrs(
+ "RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, inputParams.m_RecurrentToOutputWeights);
+ CheckConstTensorPtrs(
+ "CellToInputWeights", m_InputParams.m_CellToInputWeights, inputParams.m_CellToInputWeights);
+ CheckConstTensorPtrs(
+ "CellToForgetWeights", m_InputParams.m_CellToForgetWeights, inputParams.m_CellToForgetWeights);
+ CheckConstTensorPtrs(
+ "CellToOutputWeights", m_InputParams.m_CellToOutputWeights, inputParams.m_CellToOutputWeights);
+ CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
+ CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
+ CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
+}
+
+BOOST_AUTO_TEST_SUITE(TestConstTensorLayerVisitor)
+
+BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
+{
+ Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestConvolution2dLayerVisitor visitor(descriptor, weights);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
+{
+ const char* layerName = "Convolution2dLayer";
+ Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
+{
+ Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+ descriptor.m_BiasEnabled = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+
+
+ TestConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
+{
+ const char* layerName = "Convolution2dLayer";
+ Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+ descriptor.m_BiasEnabled = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+
+ TestConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
+{
+ DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
+{
+ const char* layerName = "DepthwiseConvolution2dLayer";
+ DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
+{
+ DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+ descriptor.m_BiasEnabled = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+
+ TestDepthwiseConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
+{
+ const char* layerName = "DepthwiseConvolution2dLayer";
+ DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 2;
+ descriptor.m_PadRight = 3;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadTop = 5;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 3;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+ descriptor.m_BiasEnabled = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+
+ TestDepthwiseConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
+{
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_TransposeWeightMatrix = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestFullyConnectedLayerVistor visitor(descriptor, weights);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
+{
+ const char* layerName = "FullyConnectedLayer";
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_TransposeWeightMatrix = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
+{
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_BiasEnabled = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+
+ TestFullyConnectedLayerWithBiasesVisitor visitor(descriptor, weights, biases);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
+{
+ const char* layerName = "FullyConnectedLayer";
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_BiasEnabled = true;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+
+ TestFullyConnectedLayerWithBiasesVisitor visitor(descriptor, weights, biases, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
+{
+ BatchNormalizationDescriptor descriptor;
+ descriptor.m_Eps = 0.0002f;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor mean(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor variance(TensorInfo(4, varianceDimensions.data(), armnn::DataType::Float32), varianceData);
+
+ std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor beta(TensorInfo(4, betaDimensions.data(), armnn::DataType::Float32), betaData);
+
+ std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), armnn::DataType::Float32), gammaData);
+
+ TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
+{
+ const char* layerName = "BatchNormalizationLayer";
+ BatchNormalizationDescriptor descriptor;
+ descriptor.m_Eps = 0.0002f;
+ descriptor.m_DataLayout = DataLayout::NHWC;
+
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor mean(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor variance(TensorInfo(4, varianceDimensions.data(), armnn::DataType::Float32), varianceData);
+
+ std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor beta(TensorInfo(4, betaDimensions.data(), armnn::DataType::Float32), betaData);
+
+ std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), armnn::DataType::Float32), gammaData);
+
+ TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
+ descriptor, mean, variance, beta, gamma, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckConstLayer)
+{
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor input(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestConstantLayerVisitor visitor(input);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddConstantLayer(input);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
+{
+ const char* layerName = "ConstantLayer";
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ armnn::ConstTensor input(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+
+ TestConstantLayerVisitor visitor(input, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
+{
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ TestLstmLayerVisitor visitor(descriptor, params);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
+{
+ const char* layerName = "LstmLayer";
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ TestLstmLayerVisitor visitor(descriptor, params, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
+{
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToInputWeights(
+ TensorInfo(4, inputToInputWeightsDimensions.data(), armnn::DataType::Float32), inputToInputWeightsData);
+
+ std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToInputWeights(TensorInfo(
+ 4, recurrentToInputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToInputWeightsData);
+
+ std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellToInputWeights(
+ TensorInfo(4, cellToInputWeightsDimensions.data(), armnn::DataType::Float32), cellToInputWeightsData);
+
+ std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputGateBias(
+ TensorInfo(4, inputGateBiasDimensions.data(), armnn::DataType::Float32), inputGateBiasData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ params.m_InputToInputWeights = &inputToInputWeights;
+ params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+ params.m_CellToInputWeights = &cellToInputWeights;
+ params.m_InputGateBias = &inputGateBias;
+
+ TestLstmLayerVisitor visitor(descriptor, params);
+
+ armnn::Network net;
+
+ IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
+{
+ const char* layerName = "LstmLayer";
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToInputWeights(
+ TensorInfo(4, inputToInputWeightsDimensions.data(), armnn::DataType::Float32), inputToInputWeightsData);
+
+ std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToInputWeights(TensorInfo(
+ 4, recurrentToInputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToInputWeightsData);
+
+ std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellToInputWeights(
+ TensorInfo(4, cellToInputWeightsDimensions.data(), armnn::DataType::Float32), cellToInputWeightsData);
+
+ std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputGateBias(
+ TensorInfo(4, inputGateBiasDimensions.data(), armnn::DataType::Float32), inputGateBiasData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ params.m_InputToInputWeights = &inputToInputWeights;
+ params.m_RecurrentToInputWeights = &recurrentToInputWeights;
+ params.m_CellToInputWeights = &cellToInputWeights;
+ params.m_InputGateBias = &inputGateBias;
+
+ TestLstmLayerVisitor visitor(descriptor, params, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
+ layer->Accept(visitor);
+}
+
+// TODO add one with peephole
+BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
+{
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+ descriptor.m_PeepholeEnabled = true;
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellToForgetWeights(
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), armnn::DataType::Float32), cellToForgetWeightsData);
+
+ std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellToOutputWeights(
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), armnn::DataType::Float32), cellToOutputWeightsData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ params.m_CellToForgetWeights = &cellToForgetWeights;
+ params.m_CellToOutputWeights = &cellToOutputWeights;
+
+ TestLstmLayerVisitor visitor(descriptor, params);
+
+ armnn::Network net;
+
+ IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
+{
+ const char* layerName = "LstmLayer";
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+ descriptor.m_PeepholeEnabled = true;
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellToForgetWeights(
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), armnn::DataType::Float32), cellToForgetWeightsData);
+
+ std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellToOutputWeights(
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), armnn::DataType::Float32), cellToOutputWeightsData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ params.m_CellToForgetWeights = &cellToForgetWeights;
+ params.m_CellToOutputWeights = &cellToOutputWeights;
+
+ TestLstmLayerVisitor visitor(descriptor, params, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
+ layer->Accept(visitor);
+}
+
+// TODO add one with projection
+BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
+{
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+ descriptor.m_ProjectionEnabled = true;
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), armnn::DataType::Float32), projectionBiasData);
+
+ std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), armnn::DataType::Float32), projectionWeightsData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ params.m_ProjectionWeights = &projectionWeights;
+ params.m_ProjectionBias = &projectionBias;
+
+ TestLstmLayerVisitor visitor(descriptor, params);
+
+ armnn::Network net;
+
+ IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
+{
+ const char* layerName = "LstmLayer";
+ LstmDescriptor descriptor;
+ descriptor.m_ActivationFunc = 3;
+ descriptor.m_ClippingThresProj = 0.5f;
+ descriptor.m_ClippingThresCell = 0.3f;
+ descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
+ descriptor.m_ProjectionEnabled = true;
+
+ std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+
+ std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+
+ std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+
+ std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+
+ std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+
+ std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+
+ std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+
+ std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+
+ std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+
+ std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), armnn::DataType::Float32), projectionBiasData);
+
+ std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+ std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
+ armnn::ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), armnn::DataType::Float32), projectionWeightsData);
+
+ LstmInputParams params;
+ params.m_InputToForgetWeights = &inputToForgetWeights;
+ params.m_InputToCellWeights = &inputToCellWeights;
+ params.m_InputToOutputWeights = &inputToOutputWeights;
+ params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
+ params.m_RecurrentToCellWeights = &recurrentToCellWeights;
+ params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
+ params.m_ForgetGateBias = &forgetGateBias;
+ params.m_CellBias = &cellBias;
+ params.m_OutputGateBias = &outputGateBias;
+
+ params.m_ProjectionWeights = &projectionWeights;
+ params.m_ProjectionBias = &projectionBias;
+
+ TestLstmLayerVisitor visitor(descriptor, params, layerName);
+
+ armnn::Network net;
+
+ IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
+ layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace armnn
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
new file mode 100644
index 0000000000..3b0f723542
--- /dev/null
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -0,0 +1,280 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "TestLayerVisitor.hpp"
+#include <armnn/Descriptors.hpp>
+#include <armnn/LstmParams.hpp>
+
+namespace armnn
+{
+
+class TestConvolution2dLayerVisitor : public TestLayerVisitor
+{
+public:
+ explicit TestConvolution2dLayerVisitor(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr) : TestLayerVisitor(name),
+ m_Descriptor(convolution2dDescriptor),
+ m_Weights(weights) {};
+
+ virtual ~TestConvolution2dLayerVisitor() {};
+
+ void VisitConvolution2dLayer(const IConnectableLayer* layer,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr) override
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(convolution2dDescriptor);
+ CheckConstTensors(m_Weights, weights);
+ }
+
+protected:
+ void CheckDescriptor(const Convolution2dDescriptor& convolution2dDescriptor);
+
+private:
+ armnn::Convolution2dDescriptor m_Descriptor;
+ armnn::ConstTensor m_Weights;
+};
+
+class TestConvolution2dWithBiasLayerVisitor : public TestConvolution2dLayerVisitor
+{
+public:
+ explicit TestConvolution2dWithBiasLayerVisitor(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr) :
+ TestConvolution2dLayerVisitor(
+ convolution2dDescriptor, weights, name),
+ m_Biases(biases) {};
+
+ // needed to suppress crappy error message about base class function i.e. version
+ // without the biases argument being hidden
+ using TestConvolution2dLayerVisitor::VisitConvolution2dLayer;
+
+ void VisitConvolution2dLayer(const IConnectableLayer* layer,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr) override
+ {
+ TestConvolution2dLayerVisitor::VisitConvolution2dLayer(layer, convolution2dDescriptor, weights, name);
+ CheckConstTensors(m_Biases, biases);
+ }
+
+private:
+ armnn::ConstTensor m_Biases;
+};
+
+class TestDepthwiseConvolution2dLayerVisitor : public TestLayerVisitor
+{
+public:
+ explicit TestDepthwiseConvolution2dLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr) : TestLayerVisitor(name),
+ m_Descriptor(descriptor),
+ m_Weights(weights) {};
+
+ virtual ~TestDepthwiseConvolution2dLayerVisitor() {};
+
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr) override
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(convolution2dDescriptor);
+ CheckConstTensors(m_Weights, weights);
+ }
+
+protected:
+ void CheckDescriptor(const DepthwiseConvolution2dDescriptor& convolution2dDescriptor);
+
+private:
+ armnn::DepthwiseConvolution2dDescriptor m_Descriptor;
+ armnn::ConstTensor m_Weights;
+};
+
+class TestDepthwiseConvolution2dWithBiasLayerVisitor : public TestDepthwiseConvolution2dLayerVisitor
+{
+public:
+ explicit TestDepthwiseConvolution2dWithBiasLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr) :
+ TestDepthwiseConvolution2dLayerVisitor(descriptor, weights, name),
+ m_Biases(biases) {};
+
+ ~TestDepthwiseConvolution2dWithBiasLayerVisitor() {};
+
+ // needed to suppress crappy error message about base class function i.e. version
+ // without the biases argument being hidden
+ using TestDepthwiseConvolution2dLayerVisitor::VisitDepthwiseConvolution2dLayer;
+
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr) override
+ {
+ TestDepthwiseConvolution2dLayerVisitor::VisitDepthwiseConvolution2dLayer(
+ layer, convolution2dDescriptor, weights, name);
+ CheckConstTensors(m_Biases, biases);
+ }
+
+private:
+ armnn::ConstTensor m_Biases;
+};
+
+class TestFullyConnectedLayerVistor : public TestLayerVisitor
+{
+public:
+ explicit TestFullyConnectedLayerVistor(const FullyConnectedDescriptor& descriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr) : TestLayerVisitor(name),
+ m_Descriptor(descriptor),
+ m_Weights(weights) {};
+
+ virtual ~TestFullyConnectedLayerVistor() {};
+
+ void VisitFullyConnectedLayer(const IConnectableLayer* layer,
+ const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr) override
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(fullyConnectedDescriptor);
+ CheckConstTensors(m_Weights, weights);
+ }
+
+protected:
+ void CheckDescriptor(const FullyConnectedDescriptor& descriptor);
+private:
+ FullyConnectedDescriptor m_Descriptor;
+ ConstTensor m_Weights;
+};
+
+class TestFullyConnectedLayerWithBiasesVisitor : public TestFullyConnectedLayerVistor
+{
+public:
+ explicit TestFullyConnectedLayerWithBiasesVisitor(const FullyConnectedDescriptor& descriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr) :
+ TestFullyConnectedLayerVistor(descriptor, weights, name),
+ m_Biases(biases) {};
+
+ // needed to suppress crappy error message about base class function i.e. version
+ // without the biases argument being hidden
+ using TestFullyConnectedLayerVistor::VisitFullyConnectedLayer;
+
+ void VisitFullyConnectedLayer(const IConnectableLayer* layer,
+ const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr) override
+ {
+ TestFullyConnectedLayerVistor::VisitFullyConnectedLayer(layer, fullyConnectedDescriptor, weights, name);
+ CheckConstTensors(m_Biases, biases);
+ }
+
+private:
+ ConstTensor m_Biases;
+};
+
+class TestBatchNormalizationLayerVisitor : public TestLayerVisitor
+{
+public:
+ TestBatchNormalizationLayerVisitor(const BatchNormalizationDescriptor& descriptor,
+ const ConstTensor& mean,
+ const ConstTensor& variance,
+ const ConstTensor& beta,
+ const ConstTensor& gamma,
+ const char* name = nullptr) : TestLayerVisitor(name),
+ m_Descriptor(descriptor),
+ m_Mean(mean),
+ m_Variance(variance),
+ m_Beta(beta),
+ m_Gamma(gamma) {};
+ void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
+ const BatchNormalizationDescriptor& descriptor,
+ const ConstTensor& mean,
+ const ConstTensor& variance,
+ const ConstTensor& beta,
+ const ConstTensor& gamma,
+ const char* name = nullptr) override
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(descriptor);
+ CheckConstTensors(m_Mean, mean);
+ CheckConstTensors(m_Variance, variance);
+ CheckConstTensors(m_Beta, beta);
+ CheckConstTensors(m_Gamma, gamma);
+ }
+
+protected:
+ void CheckDescriptor(const BatchNormalizationDescriptor& descriptor);
+private:
+ BatchNormalizationDescriptor m_Descriptor;
+ ConstTensor m_Mean;
+ ConstTensor m_Variance;
+ ConstTensor m_Beta;
+ ConstTensor m_Gamma;
+};
+
+class TestConstantLayerVisitor : public TestLayerVisitor
+{
+public:
+ explicit TestConstantLayerVisitor(const ConstTensor& input, const char* name = nullptr) :
+ TestLayerVisitor(name), m_Input(input) {};
+
+ void VisitConstantLayer(const IConnectableLayer* layer,
+ const ConstTensor& input,
+ const char* name = nullptr)
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckConstTensors(m_Input, input);
+ }
+
+private:
+ ConstTensor m_Input;
+};
+
+class TestLstmLayerVisitor : public TestLayerVisitor
+{
+public:
+ explicit TestLstmLayerVisitor(const LstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name = nullptr) :
+ TestLayerVisitor(name), m_Descriptor(descriptor), m_InputParams(params) {};
+
+ void VisitLstmLayer(const IConnectableLayer* layer,
+ const LstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name = nullptr)
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(descriptor);
+ CheckInputParameters(params);
+ }
+
+protected:
+ void CheckDescriptor(const LstmDescriptor& descriptor);
+ void CheckInputParameters(const LstmInputParams& inputParams);
+ void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
+
+private:
+ LstmDescriptor m_Descriptor;
+ LstmInputParams m_InputParams;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index 5cb7bdfca5..2584179bb7 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -30,4 +30,22 @@ void TestLayerVisitor::CheckLayerPointer(const IConnectableLayer* layer)
BOOST_CHECK(layer != nullptr);
};
+void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual)
+{
+ BOOST_CHECK(expected.GetInfo() == actual.GetInfo());
+ BOOST_CHECK(expected.GetNumDimensions() == actual.GetNumDimensions());
+ BOOST_CHECK(expected.GetNumElements() == actual.GetNumElements());
+ BOOST_CHECK(expected.GetNumBytes() == actual.GetNumBytes());
+ if (expected.GetNumBytes() == actual.GetNumBytes())
+ {
+ //check data is the same byte by byte
+ const unsigned char* expectedPtr = static_cast<const unsigned char*>(expected.GetMemoryArea());
+ const unsigned char* actualPtr = static_cast<const unsigned char*>(actual.GetMemoryArea());
+ for (unsigned int i = 0; i < expected.GetNumBytes(); i++)
+ {
+ BOOST_CHECK(*(expectedPtr + i) == *(actualPtr + i));
+ }
+ }
+}
+
} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index 1998fb9b3b..5775df0e61 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -5,6 +5,7 @@
#pragma once
#include <armnn/ILayerVisitor.hpp>
+#include <armnn/Descriptors.hpp>
namespace armnn
{
@@ -18,6 +19,8 @@ protected:
void CheckLayerPointer(const IConnectableLayer* layer);
+ void CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual);
+
private:
const char* m_LayerName;