aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-15 17:35:36 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-10-16 09:39:56 +0000
commit6fe5247f8997a04edfdd7c974c96a0a086ef3ab5 (patch)
tree52d6cc314797f7bf138a0b2d81491543e05b6900
parent20bea0071d507772e303eb6f1c476bf1feac9be5 (diff)
downloadarmnn-6fe5247f8997a04edfdd7c974c96a0a086ef3ab5.tar.gz
IVGCVSW-3991 Make Descriptor objects comparable and refactor LayerVisitor tests
* Implemented operator==() for Descriptor structs * Refactored TestNameAndDescriptorLayerVisitor to eliminate code duplication by using templates and taking advantage of the fact that descriptor objects can now all be compared the same way using == * Cleaned up TestNameOnlylayerVisitor by moving all test cases for layers that require a descriptor to TestNameAndDescriptorLayerVisitor Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Iee38b04d68d34a5f4ec7e5790de39ecb7ab0fb80
-rw-r--r--include/armnn/Descriptors.hpp435
-rw-r--r--src/armnn/Descriptors.cpp44
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp554
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp784
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp54
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.hpp55
6 files changed, 641 insertions, 1285 deletions
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index e2e59741a3..92e842b2c1 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -19,7 +19,16 @@ namespace armnn
/// An ActivationDescriptor for the ActivationLayer.
struct ActivationDescriptor
{
- ActivationDescriptor() : m_Function(ActivationFunction::Sigmoid), m_A(0), m_B(0) {}
+ ActivationDescriptor()
+ : m_Function(ActivationFunction::Sigmoid)
+ , m_A(0)
+ , m_B(0)
+ {}
+
+ bool operator ==(const ActivationDescriptor &rhs) const
+ {
+ return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
+ }
/// @brief The activation function to use
/// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
@@ -34,10 +43,15 @@ struct ActivationDescriptor
struct ArgMinMaxDescriptor
{
ArgMinMaxDescriptor()
- : m_Function(ArgMinMaxFunction::Min)
- , m_Axis(-1)
+ : m_Function(ArgMinMaxFunction::Min)
+ , m_Axis(-1)
{}
+ bool operator ==(const ArgMinMaxDescriptor &rhs) const
+ {
+ return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis;
+ }
+
/// Specify if the function is to find Min or Max.
ArgMinMaxFunction m_Function;
/// Axis to reduce across the input tensor.
@@ -49,12 +63,17 @@ struct PermuteDescriptor
{
PermuteDescriptor()
: m_DimMappings{}
- {
- }
+ {}
+
PermuteDescriptor(const PermutationVector& dimMappings)
: m_DimMappings(dimMappings)
+ {}
+
+ bool operator ==(const PermuteDescriptor &rhs) const
{
+ return m_DimMappings.IsEqual(rhs.m_DimMappings);
}
+
/// @brief Indicates how to translate tensor elements from a given source into the target destination, when
/// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
PermutationVector m_DimMappings;
@@ -64,10 +83,15 @@ struct PermuteDescriptor
struct SoftmaxDescriptor
{
SoftmaxDescriptor()
- : m_Beta(1.0f)
- , m_Axis(-1)
+ : m_Beta(1.0f)
+ , m_Axis(-1)
{}
+ bool operator ==(const SoftmaxDescriptor& rhs) const
+ {
+ return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
+ }
+
/// Exponentiation value.
float m_Beta;
/// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
@@ -91,6 +115,8 @@ struct OriginsDescriptor
OriginsDescriptor& operator=(OriginsDescriptor rhs);
+ bool operator ==(const OriginsDescriptor& rhs) const;
+
/// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
/// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
/// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
@@ -131,6 +157,9 @@ struct ViewsDescriptor
~ViewsDescriptor();
ViewsDescriptor& operator=(ViewsDescriptor rhs);
+
+ bool operator ==(const ViewsDescriptor& rhs) const;
+
/// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
/// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
/// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
@@ -244,20 +273,36 @@ OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first,
struct Pooling2dDescriptor
{
Pooling2dDescriptor()
- : m_PoolType(PoolingAlgorithm::Max)
- , m_PadLeft(0)
- , m_PadRight(0)
- , m_PadTop(0)
- , m_PadBottom(0)
- , m_PoolWidth(0)
- , m_PoolHeight(0)
- , m_StrideX(0)
- , m_StrideY(0)
- , m_OutputShapeRounding(OutputShapeRounding::Floor)
- , m_PaddingMethod(PaddingMethod::Exclude)
- , m_DataLayout(DataLayout::NCHW)
+ : m_PoolType(PoolingAlgorithm::Max)
+ , m_PadLeft(0)
+ , m_PadRight(0)
+ , m_PadTop(0)
+ , m_PadBottom(0)
+ , m_PoolWidth(0)
+ , m_PoolHeight(0)
+ , m_StrideX(0)
+ , m_StrideY(0)
+ , m_OutputShapeRounding(OutputShapeRounding::Floor)
+ , m_PaddingMethod(PaddingMethod::Exclude)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const Pooling2dDescriptor& rhs) const
+ {
+ return m_PoolType == rhs.m_PoolType &&
+ m_PadLeft == rhs.m_PadLeft &&
+ m_PadRight == rhs.m_PadRight &&
+ m_PadTop == rhs.m_PadTop &&
+ m_PadBottom == rhs.m_PadBottom &&
+ m_PoolWidth == rhs.m_PoolWidth &&
+ m_PoolHeight == rhs.m_PoolHeight &&
+ m_StrideX == rhs.m_StrideX &&
+ m_StrideY == rhs.m_StrideY &&
+ m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
+ m_PaddingMethod == rhs.m_PaddingMethod &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// The pooling algorithm to use (Max. Average, L2).
PoolingAlgorithm m_PoolType;
/// Padding left value in the width dimension.
@@ -288,10 +333,15 @@ struct Pooling2dDescriptor
struct FullyConnectedDescriptor
{
FullyConnectedDescriptor()
- : m_BiasEnabled(false)
- , m_TransposeWeightMatrix(false)
+ : m_BiasEnabled(false)
+ , m_TransposeWeightMatrix(false)
{}
+ bool operator ==(const FullyConnectedDescriptor& rhs) const
+ {
+ return m_BiasEnabled == rhs.m_BiasEnabled && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix;
+ }
+
/// Enable/disable bias.
bool m_BiasEnabled;
/// Enable/disable transpose weight matrix.
@@ -302,18 +352,32 @@ struct FullyConnectedDescriptor
struct Convolution2dDescriptor
{
Convolution2dDescriptor()
- : m_PadLeft(0)
- , m_PadRight(0)
- , m_PadTop(0)
- , m_PadBottom(0)
- , m_StrideX(0)
- , m_StrideY(0)
- , m_DilationX(1)
- , m_DilationY(1)
- , m_BiasEnabled(false)
- , m_DataLayout(DataLayout::NCHW)
+ : m_PadLeft(0)
+ , m_PadRight(0)
+ , m_PadTop(0)
+ , m_PadBottom(0)
+ , m_StrideX(0)
+ , m_StrideY(0)
+ , m_DilationX(1)
+ , m_DilationY(1)
+ , m_BiasEnabled(false)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const Convolution2dDescriptor& rhs) const
+ {
+ return m_PadLeft == rhs.m_PadLeft &&
+ m_PadRight == rhs.m_PadRight &&
+ m_PadTop == rhs.m_PadTop &&
+ m_PadBottom == rhs.m_PadBottom &&
+ m_StrideX == rhs.m_StrideX &&
+ m_StrideY == rhs.m_StrideY &&
+ m_DilationX == rhs.m_DilationX &&
+ m_DilationY == rhs.m_DilationY &&
+ m_BiasEnabled == rhs.m_BiasEnabled &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Padding left value in the width dimension.
uint32_t m_PadLeft;
/// Padding right value in the width dimension.
@@ -340,18 +404,32 @@ struct Convolution2dDescriptor
struct DepthwiseConvolution2dDescriptor
{
DepthwiseConvolution2dDescriptor()
- : m_PadLeft(0)
- , m_PadRight(0)
- , m_PadTop(0)
- , m_PadBottom(0)
- , m_StrideX(0)
- , m_StrideY(0)
- , m_DilationX(1)
- , m_DilationY(1)
- , m_BiasEnabled(false)
- , m_DataLayout(DataLayout::NCHW)
+ : m_PadLeft(0)
+ , m_PadRight(0)
+ , m_PadTop(0)
+ , m_PadBottom(0)
+ , m_StrideX(0)
+ , m_StrideY(0)
+ , m_DilationX(1)
+ , m_DilationY(1)
+ , m_BiasEnabled(false)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const DepthwiseConvolution2dDescriptor& rhs) const
+ {
+ return m_PadLeft == rhs.m_PadLeft &&
+ m_PadRight == rhs.m_PadRight &&
+ m_PadTop == rhs.m_PadTop &&
+ m_PadBottom == rhs.m_PadBottom &&
+ m_StrideX == rhs.m_StrideX &&
+ m_StrideY == rhs.m_StrideY &&
+ m_DilationX == rhs.m_DilationX &&
+ m_DilationY == rhs.m_DilationY &&
+ m_BiasEnabled == rhs.m_BiasEnabled &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Padding left value in the width dimension.
uint32_t m_PadLeft;
/// Padding right value in the width dimension.
@@ -377,19 +455,34 @@ struct DepthwiseConvolution2dDescriptor
struct DetectionPostProcessDescriptor
{
DetectionPostProcessDescriptor()
- : m_MaxDetections(0)
- , m_MaxClassesPerDetection(1)
- , m_DetectionsPerClass(1)
- , m_NmsScoreThreshold(0)
- , m_NmsIouThreshold(0)
- , m_NumClasses(0)
- , m_UseRegularNms(false)
- , m_ScaleX(0)
- , m_ScaleY(0)
- , m_ScaleW(0)
- , m_ScaleH(0)
+ : m_MaxDetections(0)
+ , m_MaxClassesPerDetection(1)
+ , m_DetectionsPerClass(1)
+ , m_NmsScoreThreshold(0)
+ , m_NmsIouThreshold(0)
+ , m_NumClasses(0)
+ , m_UseRegularNms(false)
+ , m_ScaleX(0)
+ , m_ScaleY(0)
+ , m_ScaleW(0)
+ , m_ScaleH(0)
{}
+ bool operator ==(const DetectionPostProcessDescriptor& rhs) const
+ {
+ return m_MaxDetections == rhs.m_MaxDetections &&
+ m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
+ m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
+ m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
+ m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
+ m_NumClasses == rhs.m_NumClasses &&
+ m_UseRegularNms == rhs.m_UseRegularNms &&
+ m_ScaleX == rhs.m_ScaleX &&
+ m_ScaleY == rhs.m_ScaleY &&
+ m_ScaleW == rhs.m_ScaleW &&
+ m_ScaleH == rhs.m_ScaleH;
+ }
+
/// Maximum numbers of detections.
uint32_t m_MaxDetections;
/// Maximum numbers of classes per detection, used in Fast NMS.
@@ -418,15 +511,26 @@ struct DetectionPostProcessDescriptor
struct NormalizationDescriptor
{
NormalizationDescriptor()
- : m_NormChannelType(NormalizationAlgorithmChannel::Across)
- , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
- , m_NormSize(0)
- , m_Alpha(0.f)
- , m_Beta(0.f)
- , m_K(0.f)
- , m_DataLayout(DataLayout::NCHW)
+ : m_NormChannelType(NormalizationAlgorithmChannel::Across)
+ , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
+ , m_NormSize(0)
+ , m_Alpha(0.f)
+ , m_Beta(0.f)
+ , m_K(0.f)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const NormalizationDescriptor& rhs) const
+ {
+ return m_NormChannelType == rhs.m_NormChannelType &&
+ m_NormMethodType == rhs.m_NormMethodType &&
+ m_NormSize == rhs.m_NormSize &&
+ m_Alpha == rhs.m_Alpha &&
+ m_Beta == rhs.m_Beta &&
+ m_K == rhs.m_K &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Normalization channel algorithm to use (Across, Within).
NormalizationAlgorithmChannel m_NormChannelType;
/// Normalization method algorithm to use (LocalBrightness, LocalContrast).
@@ -447,10 +551,15 @@ struct NormalizationDescriptor
struct L2NormalizationDescriptor
{
L2NormalizationDescriptor()
- : m_Eps(1e-12f)
- , m_DataLayout(DataLayout::NCHW)
+ : m_Eps(1e-12f)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const L2NormalizationDescriptor& rhs) const
+ {
+ return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Used to avoid dividing by zero.
float m_Eps;
/// The data layout to be used (NCHW, NHWC).
@@ -461,10 +570,15 @@ struct L2NormalizationDescriptor
struct BatchNormalizationDescriptor
{
BatchNormalizationDescriptor()
- : m_Eps(0.0001f)
- , m_DataLayout(DataLayout::NCHW)
+ : m_Eps(0.0001f)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const BatchNormalizationDescriptor& rhs) const
+ {
+ return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Value to add to the variance. Used to avoid dividing by zero.
float m_Eps;
/// The data layout to be used (NCHW, NHWC).
@@ -481,6 +595,14 @@ struct InstanceNormalizationDescriptor
, m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const InstanceNormalizationDescriptor& rhs) const
+ {
+ return m_Gamma == rhs.m_Gamma &&
+ m_Beta == rhs.m_Beta &&
+ m_Eps == rhs.m_Eps &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
float m_Gamma;
/// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
@@ -507,6 +629,13 @@ struct BatchToSpaceNdDescriptor
, m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
+ {
+ return m_BlockShape == rhs.m_BlockShape &&
+ m_Crops == rhs.m_Crops &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Block shape values.
std::vector<unsigned int> m_BlockShape;
/// The values to crop from the input dimension.
@@ -518,11 +647,16 @@ struct BatchToSpaceNdDescriptor
/// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
struct FakeQuantizationDescriptor
{
- FakeQuantizationDescriptor()
- : m_Min(-6.0f)
- , m_Max(6.0f)
+ FakeQuantizationDescriptor()
+ : m_Min(-6.0f)
+ , m_Max(6.0f)
{}
+ bool operator ==(const FakeQuantizationDescriptor& rhs) const
+ {
+ return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
+ }
+
/// Minimum value.
float m_Min;
/// Maximum value.
@@ -533,9 +667,9 @@ struct FakeQuantizationDescriptor
struct ResizeBilinearDescriptor
{
ResizeBilinearDescriptor()
- : m_TargetWidth(0)
- , m_TargetHeight(0)
- , m_DataLayout(DataLayout::NCHW)
+ : m_TargetWidth(0)
+ , m_TargetHeight(0)
+ , m_DataLayout(DataLayout::NCHW)
{}
/// Target width value.
@@ -550,12 +684,20 @@ struct ResizeBilinearDescriptor
struct ResizeDescriptor
{
ResizeDescriptor()
- : m_TargetWidth(0)
- , m_TargetHeight(0)
- , m_Method(ResizeMethod::NearestNeighbor)
- , m_DataLayout(DataLayout::NCHW)
+ : m_TargetWidth(0)
+ , m_TargetHeight(0)
+ , m_Method(ResizeMethod::NearestNeighbor)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const ResizeDescriptor& rhs) const
+ {
+ return m_TargetWidth == rhs.m_TargetWidth &&
+ m_TargetHeight == rhs.m_TargetHeight &&
+ m_Method == rhs.m_Method &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Target width value.
uint32_t m_TargetWidth;
/// Target height value.
@@ -572,13 +714,18 @@ struct ResizeDescriptor
struct ReshapeDescriptor
{
ReshapeDescriptor()
- : m_TargetShape()
+ : m_TargetShape()
{}
ReshapeDescriptor(const TensorShape& shape)
- : m_TargetShape(shape)
+ : m_TargetShape(shape)
{}
+ bool operator ==(const ReshapeDescriptor& rhs) const
+ {
+ return m_TargetShape == rhs.m_TargetShape;
+ }
+
/// Target shape value.
TensorShape m_TargetShape;
};
@@ -587,18 +734,25 @@ struct ReshapeDescriptor
struct SpaceToBatchNdDescriptor
{
SpaceToBatchNdDescriptor()
- : m_BlockShape({1, 1})
- , m_PadList({{0, 0}, {0, 0}})
- , m_DataLayout(DataLayout::NCHW)
+ : m_BlockShape({1, 1})
+ , m_PadList({{0, 0}, {0, 0}})
+ , m_DataLayout(DataLayout::NCHW)
{}
SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
const std::vector<std::pair<unsigned int, unsigned int>>& padList)
- : m_BlockShape(blockShape)
- , m_PadList(padList)
- , m_DataLayout(DataLayout::NCHW)
+ : m_BlockShape(blockShape)
+ , m_PadList(padList)
+ , m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
+ {
+ return m_BlockShape == rhs.m_BlockShape &&
+ m_PadList == rhs.m_PadList &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Block shape value.
std::vector<unsigned int> m_BlockShape;
/// @brief Specifies the padding values for the input dimension:
@@ -620,6 +774,11 @@ struct SpaceToDepthDescriptor
, m_DataLayout(dataLayout)
{}
+ bool operator ==(const SpaceToDepthDescriptor& rhs) const
+ {
+ return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Scalar specifying the input block size. It must be >= 1
unsigned int m_BlockSize;
@@ -634,15 +793,25 @@ using DepthToSpaceDescriptor = SpaceToDepthDescriptor;
struct LstmDescriptor
{
LstmDescriptor()
- : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
- , m_ClippingThresCell(0.0)
- , m_ClippingThresProj(0.0)
- , m_CifgEnabled(true)
- , m_PeepholeEnabled(false)
- , m_ProjectionEnabled(false)
- , m_LayerNormEnabled(false)
+ : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
+ , m_ClippingThresCell(0.0)
+ , m_ClippingThresProj(0.0)
+ , m_CifgEnabled(true)
+ , m_PeepholeEnabled(false)
+ , m_ProjectionEnabled(false)
+ , m_LayerNormEnabled(false)
{}
+ bool operator ==(const LstmDescriptor& rhs) const
+ {
+ return m_ActivationFunc == rhs.m_ActivationFunc &&
+ m_ClippingThresCell == rhs.m_ClippingThresCell &&
+ m_ClippingThresProj == rhs.m_ClippingThresProj &&
+ m_CifgEnabled == rhs.m_CifgEnabled &&
+ m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
+ m_LayerNormEnabled == rhs.m_LayerNormEnabled;
+ }
+
/// @brief The activation function to use.
/// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
uint32_t m_ActivationFunc;
@@ -664,15 +833,20 @@ struct LstmDescriptor
struct MeanDescriptor
{
MeanDescriptor()
- : m_Axis()
- , m_KeepDims(false)
+ : m_Axis()
+ , m_KeepDims(false)
{}
MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
- : m_Axis(axis)
- , m_KeepDims(keepDims)
+ : m_Axis(axis)
+ , m_KeepDims(keepDims)
{}
+ bool operator ==(const MeanDescriptor& rhs) const
+ {
+ return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
+ }
+
/// Values for the dimensions to reduce.
std::vector<unsigned int> m_Axis;
/// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
@@ -686,9 +860,15 @@ struct PadDescriptor
{}
PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
- : m_PadList(padList), m_PadValue(padValue)
+ : m_PadList(padList)
+ , m_PadValue(padValue)
{}
+ bool operator ==(const PadDescriptor& rhs) const
+ {
+ return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
+ }
+
/// @brief Specifies the padding for input dimension.
/// First is the number of values to add before the tensor in the dimension.
/// Second is the number of values to add after the tensor in the dimension.
@@ -710,6 +890,11 @@ struct SliceDescriptor
SliceDescriptor() : SliceDescriptor({}, {})
{}
+ bool operator ==(const SliceDescriptor& rhs) const
+ {
+ return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
+ }
+
/// Beginning indices of the slice in each dimension.
std::vector<unsigned int> m_Begin;
@@ -721,17 +906,24 @@ struct SliceDescriptor
struct StackDescriptor
{
StackDescriptor()
- : m_Axis(0)
- , m_NumInputs(0)
- , m_InputShape()
+ : m_Axis(0)
+ , m_NumInputs(0)
+ , m_InputShape()
{}
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
- : m_Axis(axis)
- , m_NumInputs(numInputs)
- , m_InputShape(inputShape)
+ : m_Axis(axis)
+ , m_NumInputs(numInputs)
+ , m_InputShape(inputShape)
{}
+ bool operator ==(const StackDescriptor& rhs) const
+ {
+ return m_Axis == rhs.m_Axis &&
+ m_NumInputs == rhs.m_NumInputs &&
+ m_InputShape == rhs.m_InputShape;
+ }
+
/// 0-based axis along which to stack the input tensors.
uint32_t m_Axis;
/// Number of input tensors.
@@ -746,21 +938,34 @@ struct StridedSliceDescriptor
StridedSliceDescriptor(const std::vector<int>& begin,
const std::vector<int>& end,
const std::vector<int>& stride)
- : m_Begin(begin)
- , m_End(end)
- , m_Stride(stride)
- , m_BeginMask(0)
- , m_EndMask(0)
- , m_ShrinkAxisMask(0)
- , m_EllipsisMask(0)
- , m_NewAxisMask(0)
- , m_DataLayout(DataLayout::NCHW)
+ : m_Begin(begin)
+ , m_End(end)
+ , m_Stride(stride)
+ , m_BeginMask(0)
+ , m_EndMask(0)
+ , m_ShrinkAxisMask(0)
+ , m_EllipsisMask(0)
+ , m_NewAxisMask(0)
+ , m_DataLayout(DataLayout::NCHW)
{}
StridedSliceDescriptor()
- : StridedSliceDescriptor({}, {}, {})
+ : StridedSliceDescriptor({}, {}, {})
{}
+ bool operator ==(const StridedSliceDescriptor& rhs) const
+ {
+ return m_Begin == rhs.m_Begin &&
+ m_End == rhs.m_End &&
+ m_Stride == rhs.m_Stride &&
+ m_BeginMask == rhs.m_BeginMask &&
+ m_EndMask == rhs.m_EndMask &&
+ m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
+ m_EllipsisMask == rhs.m_EllipsisMask &&
+ m_NewAxisMask == rhs.m_NewAxisMask &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
int GetStopForAxis(const TensorShape& inputShape,
unsigned int axis,
@@ -818,6 +1023,18 @@ struct TransposeConvolution2dDescriptor
m_DataLayout(DataLayout::NCHW)
{}
+ bool operator ==(const TransposeConvolution2dDescriptor& rhs) const
+ {
+ return m_PadLeft == rhs.m_PadLeft &&
+ m_PadRight == rhs.m_PadRight &&
+ m_PadTop == rhs.m_PadTop &&
+ m_PadBottom == rhs.m_PadBottom &&
+ m_StrideX == rhs.m_StrideX &&
+ m_StrideY == rhs.m_StrideY &&
+ m_BiasEnabled == rhs.m_BiasEnabled &&
+ m_DataLayout == rhs.m_DataLayout;
+ }
+
/// Padding left value in the width dimension.
uint32_t m_PadLeft;
/// Padding right value in the width dimension.
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index a6339cf08e..381d040683 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -124,6 +124,29 @@ OriginsDescriptor& OriginsDescriptor::operator=(OriginsDescriptor rhs)
return *this;
}
+bool OriginsDescriptor::operator==(const OriginsDescriptor& rhs) const
+{
+ if (GetNumViews() != rhs.GetNumViews() ||
+ GetNumDimensions() != rhs.GetNumDimensions() ||
+ GetConcatAxis() != rhs.GetConcatAxis())
+ {
+ return false;
+ }
+
+ for (unsigned int i = 0u; i < GetNumViews(); ++i)
+ {
+ for (unsigned int j = 0u; j < GetNumDimensions(); ++j)
+ {
+ if (GetViewOrigin(i)[j] != rhs.GetViewOrigin(i)[j])
+ {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
void OriginsDescriptor::SetConcatAxis(unsigned int concatAxis)
{
m_ConcatAxis = concatAxis;
@@ -240,6 +263,27 @@ ViewsDescriptor& ViewsDescriptor::operator=(ViewsDescriptor rhs)
return *this;
}
+bool ViewsDescriptor::operator==(const ViewsDescriptor& rhs) const
+{
+ if (GetNumViews() != rhs.GetNumViews() || GetNumDimensions() != rhs.GetNumDimensions())
+ {
+ return false;
+ }
+
+ for (unsigned int i = 0u; i < GetNumViews(); ++i)
+ {
+ for (unsigned int j = 0u; j < GetNumDimensions(); ++j)
+ {
+ if (GetViewOrigin(i)[j] != rhs.GetViewOrigin(i)[j] || GetViewSizes(i)[j] != rhs.GetViewSizes(i)[j])
+ {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
uint32_t ViewsDescriptor::GetNumViews() const
{
return m_Origins.GetNumViews();
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index e2bfb01733..0b126235e8 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -5,475 +5,261 @@
#include "TestNameAndDescriptorLayerVisitor.hpp"
#include "Network.hpp"
-namespace armnn
-{
+#include <armnn/Exceptions.hpp>
-void Set2dDataValues(SplitterDescriptor descriptor, uint32_t value)
+namespace
{
- for (unsigned int i = 0; i < descriptor.GetNumViews(); ++i)
- {
- for (unsigned int j = 0; j < descriptor.GetNumDimensions(); ++j)
- {
- descriptor.SetViewOriginCoord(i, j, value);
- descriptor.SetViewSize(i, j, value);
- }
- }
-}
-void Set2dDataValues(OriginsDescriptor& descriptor, uint32_t value)
-{
- for (unsigned int i = 0; i < descriptor.GetNumViews(); ++i)
- {
- for (unsigned int j = 0; j < descriptor.GetNumDimensions(); ++j)
- {
- descriptor.SetViewOriginCoord(i, j, value);
- }
- }
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name) \
+BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameAndDescriptor) \
+{ \
+ const char* layerName = "name##Layer"; \
+ armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
+ Test##name##LayerVisitor visitor(descriptor, layerName); \
+ armnn::Network net; \
+ armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor, layerName); \
+ layer->Accept(visitor); \
}
-BOOST_AUTO_TEST_SUITE(TestNameAndDescriptorLayerVisitor)
-
-BOOST_AUTO_TEST_CASE(CheckPermuteLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "PermuteLayer";
- PermuteDescriptor descriptor({0, 1, 2, 3});
- TestPermuteLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddPermuteLayer(descriptor, layerName);
- layer->Accept(visitor);
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name) \
+BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptrAndDescriptor) \
+{ \
+ armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
+ Test##name##LayerVisitor visitor(descriptor); \
+ armnn::Network net; \
+ armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor); \
+ layer->Accept(visitor); \
}
-BOOST_AUTO_TEST_CASE(CheckPermuteLayerVisitorNameNullAndDescriptor)
-{
- PermuteDescriptor descriptor({0, 1, 2, 3});
- TestPermuteLayerVisitor visitor(descriptor);
- Network net;
+#define TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(name) \
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name) \
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name)
- IConnectableLayer *const layer = net.AddPermuteLayer(descriptor);
- layer->Accept(visitor);
-}
+template<typename Descriptor> Descriptor GetDescriptor();
-BOOST_AUTO_TEST_CASE(CheckBatchToSpaceNdLayerVisitorNameAndDescriptor)
+template<>
+armnn::ActivationDescriptor GetDescriptor<armnn::ActivationDescriptor>()
{
- const char* layerName = "BatchToSpaceNdLayer";
- BatchToSpaceNdDescriptor descriptor({1, 1}, {{0, 0}, {0, 0}});
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- TestBatchToSpaceNdLayerVisitor visitor(descriptor, layerName);
- Network net;
+ armnn::ActivationDescriptor descriptor;
+ descriptor.m_Function = armnn::ActivationFunction::Linear;
+ descriptor.m_A = 2.0f;
+ descriptor.m_B = 2.0f;
- IConnectableLayer *const layer = net.AddBatchToSpaceNdLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckBatchToSpaceNdLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::ArgMinMaxDescriptor GetDescriptor<armnn::ArgMinMaxDescriptor>()
{
- BatchToSpaceNdDescriptor descriptor({1, 1}, {{0, 0}, {0, 0}});
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- TestBatchToSpaceNdLayerVisitor visitor(descriptor);
- Network net;
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Function = armnn::ArgMinMaxFunction::Max;
+ descriptor.m_Axis = 1;
- IConnectableLayer *const layer = net.AddBatchToSpaceNdLayer(descriptor);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckPooling2dLayerVisitorNameAndDescriptor)
+template<>
+armnn::BatchToSpaceNdDescriptor GetDescriptor<armnn::BatchToSpaceNdDescriptor>()
{
- const char* layerName = "Pooling2dLayer";
- Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = PoolingAlgorithm::Max;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_PoolWidth = 1;
- descriptor.m_PoolHeight = 1;
- descriptor.m_StrideX = 1;
- descriptor.m_StrideY = 1;
- descriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling;
- descriptor.m_PaddingMethod = PaddingMethod::IgnoreValue;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestPooling2dLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddPooling2dLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return armnn::BatchToSpaceNdDescriptor({ 1, 1 }, {{ 0, 0 }, { 0, 0 }});
}
-BOOST_AUTO_TEST_CASE(CheckPooling2dLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::ConcatDescriptor GetDescriptor<armnn::ConcatDescriptor>()
{
- Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = PoolingAlgorithm::Max;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_PoolWidth = 1;
- descriptor.m_PoolHeight = 1;
- descriptor.m_StrideX = 1;
- descriptor.m_StrideY = 1;
- descriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling;
- descriptor.m_PaddingMethod = PaddingMethod::IgnoreValue;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestPooling2dLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddPooling2dLayer(descriptor);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckActivationLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "ActivationLayer";
- ActivationDescriptor descriptor;
- descriptor.m_Function = ActivationFunction::Linear;
- descriptor.m_A = 2;
- descriptor.m_B = 2;
- TestActivationLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddActivationLayer(descriptor, layerName);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckActivationLayerVisitorNameNullAndDescriptor)
-{
- ActivationDescriptor descriptor;
- descriptor.m_Function = ActivationFunction::Linear;
- descriptor.m_A = 2;
- descriptor.m_B = 2;
- TestActivationLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddActivationLayer(descriptor);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckNormalizationLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "NormalizationLayer";
- NormalizationDescriptor descriptor;
- descriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within;
- descriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalContrast;
- descriptor.m_NormSize = 1;
- descriptor.m_Alpha = 1;
- descriptor.m_Beta = 1;
- descriptor.m_K = 1;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestNormalizationLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddNormalizationLayer(descriptor, layerName);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckNormalizationLayerVisitorNameNullAndDescriptor)
-{
- NormalizationDescriptor descriptor;
- descriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within;
- descriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalContrast;
- descriptor.m_NormSize = 1;
- descriptor.m_Alpha = 1;
- descriptor.m_Beta = 1;
- descriptor.m_K = 1;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestNormalizationLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddNormalizationLayer(descriptor);
- layer->Accept(visitor);
-}
+ armnn::ConcatDescriptor descriptor(2, 2);
+ for (unsigned int i = 0u; i < descriptor.GetNumViews(); ++i)
+ {
+ for (unsigned int j = 0u; j < descriptor.GetNumDimensions(); ++j)
+ {
+ descriptor.SetViewOriginCoord(i, j, i);
+ }
+ }
-BOOST_AUTO_TEST_CASE(CheckSoftmaxLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "SoftmaxLayer";
- SoftmaxDescriptor descriptor;
- descriptor.m_Beta = 2;
- TestSoftmaxLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddSoftmaxLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckSoftmaxLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::InstanceNormalizationDescriptor GetDescriptor<armnn::InstanceNormalizationDescriptor>()
{
- SoftmaxDescriptor descriptor;
- descriptor.m_Beta = 2;
- TestSoftmaxLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddSoftmaxLayer(descriptor);
- layer->Accept(visitor);
-}
+ armnn::InstanceNormalizationDescriptor descriptor;
+ descriptor.m_Gamma = 1.0f;
+ descriptor.m_Beta = 2.0f;
+ descriptor.m_Eps = 0.0001f;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-BOOST_AUTO_TEST_CASE(CheckSplitterLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "SplitterLayer";
- SplitterDescriptor descriptor(2, 2);
- Set2dDataValues(descriptor, 1);
- TestSplitterLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddSplitterLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckSplitterLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::L2NormalizationDescriptor GetDescriptor<armnn::L2NormalizationDescriptor>()
{
- SplitterDescriptor descriptor(2, 2);
- Set2dDataValues(descriptor, 1);
- TestSplitterLayerVisitor visitor(descriptor);
- Network net;
+ armnn::L2NormalizationDescriptor descriptor;
+ descriptor.m_Eps = 0.0001f;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- IConnectableLayer *const layer = net.AddSplitterLayer(descriptor);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameAndDescriptor)
+template<>
+armnn::MeanDescriptor GetDescriptor<armnn::MeanDescriptor>()
{
- const char* layerName = "ConcatLayer";
- OriginsDescriptor descriptor(2, 2);
- Set2dDataValues(descriptor, 1);
- descriptor.SetConcatAxis(1);
- TestConcatLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddConcatLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return armnn::MeanDescriptor({ 1, 2, }, true);
}
-BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::NormalizationDescriptor GetDescriptor<armnn::NormalizationDescriptor>()
{
- OriginsDescriptor descriptor(2, 2);
- Set2dDataValues(descriptor, 1);
- descriptor.SetConcatAxis(1);
- TestConcatLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddConcatLayer(descriptor);
- layer->Accept(visitor);
-}
+ armnn::NormalizationDescriptor descriptor;
+ descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Within;
+ descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalContrast;
+ descriptor.m_NormSize = 1u;
+ descriptor.m_Alpha = 1.0f;
+ descriptor.m_Beta = 1.0f;
+ descriptor.m_K = 1.0f;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-BOOST_AUTO_TEST_CASE(CheckResizeLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "ResizeLayer";
- ResizeDescriptor descriptor;
- descriptor.m_TargetHeight = 1;
- descriptor.m_TargetWidth = 1;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestResizeLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddResizeLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckResizeLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::PadDescriptor GetDescriptor<armnn::PadDescriptor>()
{
- ResizeDescriptor descriptor;
- descriptor.m_TargetHeight = 1;
- descriptor.m_TargetWidth = 1;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestResizeLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddResizeLayer(descriptor);
- layer->Accept(visitor);
+ return armnn::PadDescriptor({{ 1, 2 }, { 3, 4 }});
}
-BOOST_AUTO_TEST_CASE(CheckInstanceNormalizationLayerVisitorNameAndDescriptor)
+template<>
+armnn::PermuteDescriptor GetDescriptor<armnn::PermuteDescriptor>()
{
- const char* layerName = "InstanceNormalizationLayer";
- InstanceNormalizationDescriptor descriptor;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestInstanceNormalizationLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddInstanceNormalizationLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return armnn::PermuteDescriptor({ 0, 1, 2, 3 });
}
-BOOST_AUTO_TEST_CASE(CheckInstanceNormalizationLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::Pooling2dDescriptor GetDescriptor<armnn::Pooling2dDescriptor>()
{
- InstanceNormalizationDescriptor descriptor;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestInstanceNormalizationLayerVisitor visitor(descriptor);
- Network net;
+ armnn::Pooling2dDescriptor descriptor;
+ descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+ descriptor.m_PadLeft = 1u;
+ descriptor.m_PadRight = 1u;
+ descriptor.m_PadTop = 1u;
+ descriptor.m_PadBottom = 1u;
+ descriptor.m_PoolWidth = 1u;
+ descriptor.m_PoolHeight = 1u;
+ descriptor.m_StrideX = 1u;
+ descriptor.m_StrideY = 1u;
+ descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
+ descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- IConnectableLayer *const layer = net.AddInstanceNormalizationLayer(descriptor);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckL2NormalizationLayerVisitorNameAndDescriptor)
+template<>
+armnn::ReshapeDescriptor GetDescriptor<armnn::ReshapeDescriptor>()
{
- const char* layerName = "L2NormalizationLayer";
- L2NormalizationDescriptor descriptor;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestL2NormalizationLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddL2NormalizationLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return armnn::ReshapeDescriptor({ 1, 2, 3, 4 });
}
-BOOST_AUTO_TEST_CASE(CheckL2NormalizationLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::ResizeDescriptor GetDescriptor<armnn::ResizeDescriptor>()
{
- L2NormalizationDescriptor descriptor;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestL2NormalizationLayerVisitor visitor(descriptor);
- Network net;
+ armnn::ResizeDescriptor descriptor;
+ descriptor.m_TargetHeight = 1u;
+ descriptor.m_TargetWidth = 1u;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- IConnectableLayer *const layer = net.AddL2NormalizationLayer(descriptor);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckLogSoftmaxLayerVisitorNameAndDescriptor)
+template<>
+armnn::SliceDescriptor GetDescriptor<armnn::SliceDescriptor>()
{
- const char* layerName = "LogSoftmaxLayer";
-
- LogSoftmaxDescriptor descriptor;
- descriptor.m_Beta = 2.0f;
- descriptor.m_Axis = 1;
-
- TestLogSoftmaxLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddLogSoftmaxLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return armnn::SliceDescriptor({ 1, 1 }, { 2, 2 });
}
-BOOST_AUTO_TEST_CASE(CheckLogSoftmaxLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::SoftmaxDescriptor GetDescriptor<armnn::SoftmaxDescriptor>()
{
- LogSoftmaxDescriptor descriptor;
+ armnn::SoftmaxDescriptor descriptor;
descriptor.m_Beta = 2.0f;
- descriptor.m_Axis = 1;
-
- TestLogSoftmaxLayerVisitor visitor(descriptor);
- Network net;
+ descriptor.m_Axis = -1;
- IConnectableLayer *const layer = net.AddLogSoftmaxLayer(descriptor);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckReshapeLayerVisitorNameAndDescriptor)
+template<>
+armnn::SpaceToBatchNdDescriptor GetDescriptor<armnn::SpaceToBatchNdDescriptor>()
{
- const char* layerName = "ReshapeLayer";
- ReshapeDescriptor descriptor({1, 2, 3, 4});
- TestReshapeLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddReshapeLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return armnn::SpaceToBatchNdDescriptor({ 2, 2 }, {{ 1, 1 } , { 1, 1 }});
}
-BOOST_AUTO_TEST_CASE(CheckReshapeLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::SpaceToDepthDescriptor GetDescriptor<armnn::SpaceToDepthDescriptor>()
{
- ReshapeDescriptor descriptor({1, 2, 3, 4});
- TestReshapeLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddReshapeLayer(descriptor);
- layer->Accept(visitor);
+ return armnn::SpaceToDepthDescriptor(2, armnn::DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CheckSpaceToBatchNdLayerVisitorNameAndDescriptor)
+template<>
+armnn::SplitterDescriptor GetDescriptor<armnn::SplitterDescriptor>()
{
- const char* layerName = "SpaceToBatchNdLayer";
- SpaceToBatchNdDescriptor descriptor({2, 2}, {{1, 1}, {1, 1}});
- TestSpaceToBatchNdLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddSpaceToBatchNdLayer(descriptor, layerName);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckSpaceToBatchNdLayerVisitorNameNullAndDescriptor)
-{
- SpaceToBatchNdDescriptor descriptor({2, 2}, {{1, 1}, {1, 1}});
- TestSpaceToBatchNdLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddSpaceToBatchNdLayer(descriptor);
- layer->Accept(visitor);
-}
-
-
-BOOST_AUTO_TEST_CASE(CheckMeanLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "MeanLayer";
- MeanDescriptor descriptor({1, 2}, false);
- TestMeanLayerVisitor visitor(descriptor, layerName);
- Network net;
-
- IConnectableLayer *const layer = net.AddMeanLayer(descriptor, layerName);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckMeanLayerVisitorNameNullAndDescriptor)
-{
- MeanDescriptor descriptor({1, 2}, false);
- TestMeanLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddMeanLayer(descriptor);
- layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckPadLayerVisitorNameAndDescriptor)
-{
- const char* layerName = "PadLayer";
- PadDescriptor descriptor({{1, 2}, {3, 4}});
- TestPadLayerVisitor visitor(descriptor, layerName);
- Network net;
+ armnn::SplitterDescriptor descriptor(2, 2);
+ for (unsigned int i = 0u; i < descriptor.GetNumViews(); ++i)
+ {
+ for (unsigned int j = 0u; j < descriptor.GetNumDimensions(); ++j)
+ {
+ descriptor.SetViewOriginCoord(i, j, i);
+ descriptor.SetViewSize(i, j, 1);
+ }
+ }
- IConnectableLayer *const layer = net.AddPadLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckPadLayerVisitorNameNullAndDescriptor)
+template<>
+armnn::StackDescriptor GetDescriptor<armnn::StackDescriptor>()
{
- PadDescriptor descriptor({{1, 2}, {3, 4}});
- TestPadLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddPadLayer(descriptor);
- layer->Accept(visitor);
+ return armnn::StackDescriptor(1, 2, { 2, 2 });
}
-BOOST_AUTO_TEST_CASE(CheckStridedSliceLayerVisitorNameAndDescriptor)
+template<>
+armnn::StridedSliceDescriptor GetDescriptor<armnn::StridedSliceDescriptor>()
{
- const char* layerName = "StridedSliceLayer";
- StridedSliceDescriptor descriptor({1, 2}, {3, 4}, {3, 4});
+ armnn::StridedSliceDescriptor descriptor({ 1, 2 }, { 3, 4 }, { 3, 4 });
descriptor.m_BeginMask = 1;
descriptor.m_EndMask = 1;
descriptor.m_ShrinkAxisMask = 1;
descriptor.m_EllipsisMask = 1;
descriptor.m_NewAxisMask = 1;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestStridedSliceLayerVisitor visitor(descriptor, layerName);
- Network net;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- IConnectableLayer *const layer = net.AddStridedSliceLayer(descriptor, layerName);
- layer->Accept(visitor);
+ return descriptor;
}
-BOOST_AUTO_TEST_CASE(CheckStridedSliceLayerVisitorNameNullAndDescriptor)
-{
- StridedSliceDescriptor descriptor({1, 2}, {3, 4}, {3, 4});
- descriptor.m_BeginMask = 1;
- descriptor.m_EndMask = 1;
- descriptor.m_ShrinkAxisMask = 1;
- descriptor.m_EllipsisMask = 1;
- descriptor.m_NewAxisMask = 1;
- descriptor.m_DataLayout = DataLayout::NHWC;
- TestStridedSliceLayerVisitor visitor(descriptor);
- Network net;
-
- IConnectableLayer *const layer = net.AddStridedSliceLayer(descriptor);
- layer->Accept(visitor);
-}
+} // anonymous namespace
-BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE(TestNameAndDescriptorLayerVisitor)
-} //namespace armnn
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Activation)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ArgMinMax)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(DepthToSpace)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(BatchToSpaceNd)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Concat)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Mean)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Normalization)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Pad)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Permute)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Pooling2d)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Reshape)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Resize)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Slice)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Softmax)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(SpaceToBatchNd)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(SpaceToDepth)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Splitter)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Stack)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(StridedSlice)
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index e46aa34e29..aefcba5f59 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -4,734 +4,64 @@
//
#pragma once
-#include <armnn/ArmNN.hpp>
#include "TestLayerVisitor.hpp"
-#include <boost/test/unit_test.hpp>
-
-namespace armnn
-{
-
-// Concrete TestLayerVisitor subclasses for layers taking Descriptor argument with overridden VisitLayer methods
-class TestPermuteLayerVisitor : public TestLayerVisitor
-{
-private:
- const PermuteDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestPermuteLayerVisitor(const PermuteDescriptor& permuteDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(permuteDescriptor.m_DimMappings)
- {};
-
- void CheckDescriptor(const PermuteDescriptor& permuteDescriptor)
- {
- if (permuteDescriptor.m_DimMappings.GetSize() == m_VisitorDescriptor.m_DimMappings.GetSize())
- {
- for (unsigned int i = 0; i < permuteDescriptor.m_DimMappings.GetSize(); ++i)
- {
- BOOST_CHECK_EQUAL(permuteDescriptor.m_DimMappings[i], m_VisitorDescriptor.m_DimMappings[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for batchToSpaceNdDescriptor m_DimMappings.");
- }
- };
-
- void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(permuteDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestBatchToSpaceNdLayerVisitor : public TestLayerVisitor
-{
-private:
- BatchToSpaceNdDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestBatchToSpaceNdLayerVisitor(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(batchToSpaceNdDescriptor.m_BlockShape, batchToSpaceNdDescriptor.m_Crops)
- {
- m_VisitorDescriptor.m_DataLayout = batchToSpaceNdDescriptor.m_DataLayout;
- };
-
- void CheckDescriptor(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor)
- {
- if (batchToSpaceNdDescriptor.m_BlockShape.size() == m_VisitorDescriptor.m_BlockShape.size())
- {
- for (unsigned int i = 0; i < batchToSpaceNdDescriptor.m_BlockShape.size(); ++i)
- {
- BOOST_CHECK_EQUAL(batchToSpaceNdDescriptor.m_BlockShape[i], m_VisitorDescriptor.m_BlockShape[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for batchToSpaceNdDescriptor m_BlockShape.");
- }
-
- if (batchToSpaceNdDescriptor.m_Crops.size() == m_VisitorDescriptor.m_Crops.size())
- {
- for (unsigned int i = 0; i < batchToSpaceNdDescriptor.m_Crops.size(); ++i)
- {
- BOOST_CHECK_EQUAL(batchToSpaceNdDescriptor.m_Crops[i].first, m_VisitorDescriptor.m_Crops[i].first);
- BOOST_CHECK_EQUAL(batchToSpaceNdDescriptor.m_Crops[i].second, m_VisitorDescriptor.m_Crops[i].second);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for batchToSpaceNdDescriptor m_Crops.");
- }
-
- BOOST_CHECK(batchToSpaceNdDescriptor.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
-
- void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
- const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(batchToSpaceNdDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestPooling2dLayerVisitor : public TestLayerVisitor
-{
-private:
- Pooling2dDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestPooling2dLayerVisitor(const Pooling2dDescriptor& pooling2dDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_PoolType = pooling2dDescriptor.m_PoolType;
- m_VisitorDescriptor.m_PadLeft = pooling2dDescriptor.m_PadLeft;
- m_VisitorDescriptor.m_PadRight = pooling2dDescriptor.m_PadRight;
- m_VisitorDescriptor.m_PadTop = pooling2dDescriptor.m_PadTop;
- m_VisitorDescriptor.m_PadBottom = pooling2dDescriptor.m_PadBottom;
- m_VisitorDescriptor.m_PoolWidth = pooling2dDescriptor.m_PoolWidth;
- m_VisitorDescriptor.m_PoolHeight = pooling2dDescriptor.m_PoolHeight;
- m_VisitorDescriptor.m_StrideX = pooling2dDescriptor.m_StrideX;
- m_VisitorDescriptor.m_StrideY = pooling2dDescriptor.m_StrideY;
- m_VisitorDescriptor.m_OutputShapeRounding = pooling2dDescriptor.m_OutputShapeRounding;
- m_VisitorDescriptor.m_PaddingMethod = pooling2dDescriptor.m_PaddingMethod;
- m_VisitorDescriptor.m_DataLayout = pooling2dDescriptor.m_DataLayout;
- };
-
- void CheckDescriptor(const Pooling2dDescriptor& pooling2dDescriptor)
- {
- BOOST_CHECK(pooling2dDescriptor.m_PoolType == m_VisitorDescriptor.m_PoolType);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_PadLeft, m_VisitorDescriptor.m_PadLeft);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_PadRight, m_VisitorDescriptor.m_PadRight);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_PadTop, m_VisitorDescriptor.m_PadTop);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_PadBottom, m_VisitorDescriptor.m_PadBottom);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_PoolWidth, m_VisitorDescriptor.m_PoolWidth);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_PoolHeight, m_VisitorDescriptor.m_PoolHeight);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_StrideX, m_VisitorDescriptor.m_StrideX);
- BOOST_CHECK_EQUAL(pooling2dDescriptor.m_StrideY, m_VisitorDescriptor.m_StrideY);
- BOOST_CHECK(pooling2dDescriptor.m_OutputShapeRounding == m_VisitorDescriptor.m_OutputShapeRounding);
- BOOST_CHECK(pooling2dDescriptor.m_PaddingMethod == m_VisitorDescriptor.m_PaddingMethod);
- BOOST_CHECK(pooling2dDescriptor.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
-
- void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(pooling2dDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestActivationLayerVisitor : public TestLayerVisitor
-{
-private:
- ActivationDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestActivationLayerVisitor(const ActivationDescriptor& activationDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_Function = activationDescriptor.m_Function;
- m_VisitorDescriptor.m_A = activationDescriptor.m_A;
- m_VisitorDescriptor.m_B = activationDescriptor.m_B;
- };
-
- void CheckDescriptor(const ActivationDescriptor& activationDescriptor)
- {
- BOOST_CHECK(activationDescriptor.m_Function == m_VisitorDescriptor.m_Function);
- BOOST_CHECK_EQUAL(activationDescriptor.m_A, m_VisitorDescriptor.m_A);
- BOOST_CHECK_EQUAL(activationDescriptor.m_B, m_VisitorDescriptor.m_B);
- };
-
- void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(activationDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestNormalizationLayerVisitor : public TestLayerVisitor
-{
-private:
- NormalizationDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestNormalizationLayerVisitor(const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_NormChannelType = normalizationDescriptor.m_NormChannelType;
- m_VisitorDescriptor.m_NormMethodType = normalizationDescriptor.m_NormMethodType;
- m_VisitorDescriptor.m_NormSize = normalizationDescriptor.m_NormSize;
- m_VisitorDescriptor.m_Alpha = normalizationDescriptor.m_Alpha;
- m_VisitorDescriptor.m_Beta = normalizationDescriptor.m_Beta;
- m_VisitorDescriptor.m_K = normalizationDescriptor.m_K;
- m_VisitorDescriptor.m_DataLayout = normalizationDescriptor.m_DataLayout;
- };
-
- void CheckDescriptor(const NormalizationDescriptor& normalizationDescriptor)
- {
- BOOST_CHECK(normalizationDescriptor.m_NormChannelType == m_VisitorDescriptor.m_NormChannelType);
- BOOST_CHECK(normalizationDescriptor.m_NormMethodType == m_VisitorDescriptor.m_NormMethodType);
- BOOST_CHECK_EQUAL(normalizationDescriptor.m_NormSize, m_VisitorDescriptor.m_NormSize);
- BOOST_CHECK_EQUAL(normalizationDescriptor.m_Alpha, m_VisitorDescriptor.m_Alpha);
- BOOST_CHECK_EQUAL(normalizationDescriptor.m_Beta, m_VisitorDescriptor.m_Beta);
- BOOST_CHECK_EQUAL(normalizationDescriptor.m_K, m_VisitorDescriptor.m_K);
- BOOST_CHECK(normalizationDescriptor.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
-
- void VisitNormalizationLayer(const IConnectableLayer* layer,
- const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(normalizationDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestSoftmaxLayerVisitor : public TestLayerVisitor
-{
-private:
- SoftmaxDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestSoftmaxLayerVisitor(const SoftmaxDescriptor& softmaxDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_Beta = softmaxDescriptor.m_Beta;
- };
-
- void CheckDescriptor(const SoftmaxDescriptor& softmaxDescriptor)
- {
- BOOST_CHECK_EQUAL(softmaxDescriptor.m_Beta, m_VisitorDescriptor.m_Beta);
- }
-
- void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(softmaxDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestSplitterLayerVisitor : public TestLayerVisitor
-{
-private:
- ViewsDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestSplitterLayerVisitor(const ViewsDescriptor& splitterDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(splitterDescriptor.GetNumViews(), splitterDescriptor.GetNumDimensions())
- {
- if (splitterDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
- {
- BOOST_ERROR("Unequal number of views in splitter descriptor.");
- }
- else if (splitterDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
- {
- BOOST_ERROR("Unequal number of dimensions in splitter descriptor.");
- }
- else
- {
- for (unsigned int i = 0; i < splitterDescriptor.GetNumViews(); ++i)
- {
- for (unsigned int j = 0; j < splitterDescriptor.GetNumDimensions(); ++j)
- {
- m_VisitorDescriptor.SetViewOriginCoord(i, j, splitterDescriptor.GetViewOrigin(i)[j]);
- m_VisitorDescriptor.SetViewSize(i, j, splitterDescriptor.GetViewSizes(i)[j]);
- }
- }
- }
- };
-
- void CheckDescriptor(const ViewsDescriptor& splitterDescriptor)
- {
-
- BOOST_CHECK_EQUAL(splitterDescriptor.GetNumViews(), m_VisitorDescriptor.GetNumViews());
- BOOST_CHECK_EQUAL(splitterDescriptor.GetNumDimensions(), m_VisitorDescriptor.GetNumDimensions());
-
- if (splitterDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
- {
- BOOST_ERROR("Unequal number of views in splitter descriptor.");
- }
- else if (splitterDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
- {
- BOOST_ERROR("Unequal number of dimensions in splitter descriptor.");
- }
- else
- {
- for (unsigned int i = 0; i < splitterDescriptor.GetNumViews(); ++i)
- {
- for (unsigned int j = 0; j < splitterDescriptor.GetNumDimensions(); ++j)
- {
- BOOST_CHECK_EQUAL(splitterDescriptor.GetViewOrigin(i)[j], m_VisitorDescriptor.GetViewOrigin(i)[j]);
- BOOST_CHECK_EQUAL(splitterDescriptor.GetViewSizes(i)[j], m_VisitorDescriptor.GetViewSizes(i)[j]);
- }
- }
- }
- };
-
- void VisitSplitterLayer(const IConnectableLayer* layer,
- const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(splitterDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestConcatLayerVisitor : public TestLayerVisitor
-{
-private:
- OriginsDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestConcatLayerVisitor(const OriginsDescriptor& concatDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(concatDescriptor.GetNumViews(), concatDescriptor.GetNumDimensions())
- {
- m_VisitorDescriptor.SetConcatAxis(concatDescriptor.GetConcatAxis());
-
- if (concatDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
- {
- BOOST_ERROR("Unequal number of views in splitter descriptor.");
- }
- else if (concatDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
- {
- BOOST_ERROR("Unequal number of dimensions in splitter descriptor.");
- }
- else
- {
- for (unsigned int i = 0; i < concatDescriptor.GetNumViews(); ++i)
- {
- for (unsigned int j = 0; j < concatDescriptor.GetNumDimensions(); ++j)
- {
- m_VisitorDescriptor.SetViewOriginCoord(i, j, concatDescriptor.GetViewOrigin(i)[j]);
- }
- }
- }
- };
-
- void CheckDescriptor(const OriginsDescriptor& concatDescriptor)
- {
- BOOST_CHECK_EQUAL(concatDescriptor.GetNumViews(), m_VisitorDescriptor.GetNumViews());
- BOOST_CHECK_EQUAL(concatDescriptor.GetNumDimensions(), m_VisitorDescriptor.GetNumDimensions());
- BOOST_CHECK_EQUAL(concatDescriptor.GetConcatAxis(), m_VisitorDescriptor.GetConcatAxis());
-
- if (concatDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
- {
- BOOST_ERROR("Unequal number of views in splitter descriptor.");
- }
- else if (concatDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
- {
- BOOST_ERROR("Unequal number of dimensions in splitter descriptor.");
- }
- else
- {
- for (unsigned int i = 0; i < concatDescriptor.GetNumViews(); ++i)
- {
- for (unsigned int j = 0; j < concatDescriptor.GetNumDimensions(); ++j)
- {
- BOOST_CHECK_EQUAL(concatDescriptor.GetViewOrigin(i)[j], m_VisitorDescriptor.GetViewOrigin(i)[j]);
- }
- }
- }
- }
-
- void VisitConcatLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& concatDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(concatDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestResizeLayerVisitor : public TestLayerVisitor
-{
-private:
- ResizeDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestResizeLayerVisitor(const ResizeDescriptor& descriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_Method = descriptor.m_Method;
- m_VisitorDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
- m_VisitorDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
- m_VisitorDescriptor.m_DataLayout = descriptor.m_DataLayout;
- };
-
- void CheckDescriptor(const ResizeDescriptor& descriptor)
- {
- BOOST_CHECK(descriptor.m_Method == m_VisitorDescriptor.m_Method);
- BOOST_CHECK(descriptor.m_TargetWidth == m_VisitorDescriptor.m_TargetWidth);
- BOOST_CHECK(descriptor.m_TargetHeight == m_VisitorDescriptor.m_TargetHeight);
- BOOST_CHECK(descriptor.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
-
- void VisitResizeLayer(const IConnectableLayer* layer,
- const ResizeDescriptor& descriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(descriptor);
- CheckLayerName(name);
- };
-};
-
-class TestInstanceNormalizationLayerVisitor : public TestLayerVisitor
-{
-private:
- InstanceNormalizationDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestInstanceNormalizationLayerVisitor(const InstanceNormalizationDescriptor& desc,
- const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_Beta = desc.m_Beta;
- m_VisitorDescriptor.m_Gamma = desc.m_Gamma;
- m_VisitorDescriptor.m_Eps = desc.m_Eps;
- m_VisitorDescriptor.m_DataLayout = desc.m_DataLayout;
- };
-
- void CheckDescriptor(const InstanceNormalizationDescriptor& desc)
- {
- BOOST_CHECK(desc.m_Beta == m_VisitorDescriptor.m_Beta);
- BOOST_CHECK(desc.m_Gamma == m_VisitorDescriptor.m_Gamma);
- BOOST_CHECK(desc.m_Eps == m_VisitorDescriptor.m_Eps);
- BOOST_CHECK(desc.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
-
- void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
- const InstanceNormalizationDescriptor& desc,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(desc);
- CheckLayerName(name);
- };
-};
-
-class TestL2NormalizationLayerVisitor : public TestLayerVisitor
-{
-private:
- L2NormalizationDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestL2NormalizationLayerVisitor(const L2NormalizationDescriptor& desc, const char* name = nullptr)
- : TestLayerVisitor(name)
- {
- m_VisitorDescriptor.m_DataLayout = desc.m_DataLayout;
- };
-
- void CheckDescriptor(const L2NormalizationDescriptor& desc)
- {
- BOOST_CHECK(desc.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
- void VisitL2NormalizationLayer(const IConnectableLayer* layer,
- const L2NormalizationDescriptor& desc,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(desc);
- CheckLayerName(name);
- };
-};
-
-class TestLogSoftmaxLayerVisitor : public TestLayerVisitor
-{
-private:
- LogSoftmaxDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestLogSoftmaxLayerVisitor(const LogSoftmaxDescriptor& descriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(descriptor) {}
-
- void CheckDescriptor(const LogSoftmaxDescriptor& descriptor)
- {
- BOOST_CHECK_EQUAL(descriptor.m_Beta, m_VisitorDescriptor.m_Beta);
- BOOST_CHECK_EQUAL(descriptor.m_Axis, m_VisitorDescriptor.m_Axis);
- }
-
- void VisitLogSoftmaxLayer(const IConnectableLayer* layer,
- const LogSoftmaxDescriptor& descriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(descriptor);
- CheckLayerName(name);
- };
-};
-
-class TestReshapeLayerVisitor : public TestLayerVisitor
-{
-private:
- const ReshapeDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestReshapeLayerVisitor(const ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(reshapeDescriptor.m_TargetShape)
- {};
-
- void CheckDescriptor(const ReshapeDescriptor& reshapeDescriptor)
- {
- BOOST_CHECK_MESSAGE(reshapeDescriptor.m_TargetShape == m_VisitorDescriptor.m_TargetShape,
- reshapeDescriptor.m_TargetShape << " compared to " << m_VisitorDescriptor.m_TargetShape);
- }
-
- void VisitReshapeLayer(const IConnectableLayer* layer,
- const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(reshapeDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestSpaceToBatchNdLayerVisitor : public TestLayerVisitor
-{
-private:
- SpaceToBatchNdDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestSpaceToBatchNdLayerVisitor(const SpaceToBatchNdDescriptor& desc, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(desc.m_BlockShape, desc.m_PadList)
- {
- m_VisitorDescriptor.m_DataLayout = desc.m_DataLayout;
- };
-
- void CheckDescriptor(const SpaceToBatchNdDescriptor& desc)
- {
- if (desc.m_BlockShape.size() == m_VisitorDescriptor.m_BlockShape.size())
- {
- for (unsigned int i = 0; i < desc.m_BlockShape.size(); ++i)
- {
- BOOST_CHECK_EQUAL(desc.m_BlockShape[i], m_VisitorDescriptor.m_BlockShape[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for SpaceToBatchNdDescriptor m_BlockShape.");
- }
-
- if (desc.m_PadList.size() == m_VisitorDescriptor.m_PadList.size())
- {
- for (unsigned int i = 0; i < desc.m_PadList.size(); ++i)
- {
- BOOST_CHECK_EQUAL(desc.m_PadList[i].first, m_VisitorDescriptor.m_PadList[i].first);
- BOOST_CHECK_EQUAL(desc.m_PadList[i].second, m_VisitorDescriptor.m_PadList[i].second);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for SpaceToBatchNdDescriptor m_PadList.");
- }
-
- BOOST_CHECK(desc.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
-
- void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& desc,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(desc);
- CheckLayerName(name);
- };
-};
-
-class TestMeanLayerVisitor : public TestLayerVisitor
-{
-private:
- const MeanDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestMeanLayerVisitor(const MeanDescriptor& meanDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(meanDescriptor.m_Axis, meanDescriptor.m_KeepDims)
- {};
-
- void CheckDescriptor(const MeanDescriptor& meanDescriptor)
- {
- if (meanDescriptor.m_Axis.size() == m_VisitorDescriptor.m_Axis.size())
- {
- for (unsigned int i = 0; i < meanDescriptor.m_Axis.size(); ++i)
- {
- BOOST_CHECK_EQUAL(meanDescriptor.m_Axis[i], m_VisitorDescriptor.m_Axis[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for MeanDescriptor m_Axis.");
- }
-
- BOOST_CHECK_EQUAL(meanDescriptor.m_KeepDims, m_VisitorDescriptor.m_KeepDims);
- }
-
- void VisitMeanLayer(const IConnectableLayer* layer,
- const MeanDescriptor& meanDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(meanDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestPadLayerVisitor : public TestLayerVisitor
-{
-private:
- const PadDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestPadLayerVisitor(const PadDescriptor& padDescriptor, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(padDescriptor.m_PadList)
- {};
-
- void CheckDescriptor(const PadDescriptor& padDescriptor)
- {
- if (padDescriptor.m_PadList.size() == m_VisitorDescriptor.m_PadList.size())
- {
- for (unsigned int i = 0; i < padDescriptor.m_PadList.size(); ++i)
- {
- BOOST_CHECK_EQUAL(padDescriptor.m_PadList[i].first, m_VisitorDescriptor.m_PadList[i].first);
- BOOST_CHECK_EQUAL(padDescriptor.m_PadList[i].second, m_VisitorDescriptor.m_PadList[i].second);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for SpaceToBatchNdDescriptor m_PadList.");
- }
- }
-
- void VisitPadLayer(const IConnectableLayer* layer,
- const PadDescriptor& padDescriptor,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(padDescriptor);
- CheckLayerName(name);
- };
-};
-
-class TestStridedSliceLayerVisitor : public TestLayerVisitor
-{
-private:
- StridedSliceDescriptor m_VisitorDescriptor;
-
-public:
- explicit TestStridedSliceLayerVisitor(const StridedSliceDescriptor& desc, const char* name = nullptr)
- : TestLayerVisitor(name)
- , m_VisitorDescriptor(desc.m_Begin, desc.m_End, desc.m_Stride)
- {
- m_VisitorDescriptor.m_BeginMask = desc.m_BeginMask;
- m_VisitorDescriptor.m_EndMask = desc.m_EndMask;
- m_VisitorDescriptor.m_ShrinkAxisMask = desc.m_ShrinkAxisMask;
- m_VisitorDescriptor.m_EllipsisMask = desc.m_EllipsisMask;
- m_VisitorDescriptor.m_NewAxisMask = desc.m_NewAxisMask;
- m_VisitorDescriptor.m_DataLayout = desc.m_DataLayout;
- };
-
- void CheckDescriptor(const StridedSliceDescriptor& desc)
- {
- if (desc.m_Begin.size() == m_VisitorDescriptor.m_Begin.size())
- {
- for (unsigned int i = 0; i < desc.m_Begin.size(); ++i)
- {
- BOOST_CHECK_EQUAL(desc.m_Begin[i], m_VisitorDescriptor.m_Begin[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for StridedSliceDescriptor m_Begin.");
- }
-
- if (desc.m_End.size() == m_VisitorDescriptor.m_End.size())
- {
- for (unsigned int i = 0; i < desc.m_End.size(); ++i)
- {
- BOOST_CHECK_EQUAL(desc.m_End[i], m_VisitorDescriptor.m_End[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for StridedSliceDescriptor m_End.");
- }
-
- if (desc.m_Stride.size() == m_VisitorDescriptor.m_Stride.size())
- {
- for (unsigned int i = 0; i < desc.m_Stride.size(); ++i)
- {
- BOOST_CHECK_EQUAL(desc.m_Stride[i], m_VisitorDescriptor.m_Stride[i]);
- }
- }
- else
- {
- BOOST_ERROR("Unequal vector size for StridedSliceDescriptor m_Stride.");
- }
-
- BOOST_CHECK_EQUAL(desc.m_BeginMask, m_VisitorDescriptor.m_BeginMask);
- BOOST_CHECK_EQUAL(desc.m_EndMask, m_VisitorDescriptor.m_EndMask);
- BOOST_CHECK_EQUAL(desc.m_ShrinkAxisMask, m_VisitorDescriptor.m_ShrinkAxisMask);
- BOOST_CHECK_EQUAL(desc.m_EllipsisMask, m_VisitorDescriptor.m_EllipsisMask);
- BOOST_CHECK_EQUAL(desc.m_NewAxisMask, m_VisitorDescriptor.m_NewAxisMask);
- BOOST_CHECK(desc.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
- }
+#include <armnn/ArmNN.hpp>
- void VisitStridedSliceLayer(const IConnectableLayer* layer,
- const StridedSliceDescriptor& desc,
- const char* name = nullptr) override
- {
- CheckLayerPointer(layer);
- CheckDescriptor(desc);
- CheckLayerName(name);
- };
-};
+#include <boost/test/unit_test.hpp>
-} //namespace armnn
+namespace
+{
+
+#define DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(name) \
+class Test##name##LayerVisitor : public armnn::TestLayerVisitor \
+{ \
+private: \
+ using Descriptor = armnn::name##Descriptor; \
+ Descriptor m_Descriptor; \
+ \
+ bool CheckDescriptor(const Descriptor& descriptor) \
+ { \
+ return descriptor == m_Descriptor; \
+ } \
+\
+public: \
+ explicit Test##name##LayerVisitor(const Descriptor& descriptor, \
+ const char* layerName = nullptr) \
+ : armnn::TestLayerVisitor(layerName) \
+ , m_Descriptor(descriptor) {}; \
+ \
+ void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
+ const Descriptor& descriptor, \
+ const char* layerName = nullptr) override \
+ { \
+ CheckLayerPointer(layer); \
+ CheckDescriptor(descriptor); \
+ CheckLayerName(layerName); \
+ } \
+};
+
+} // anonymous namespace
+
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Activation)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ArgMinMax)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(BatchToSpaceNd)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(InstanceNormalization)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(L2Normalization)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(LogSoftmax)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Mean)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Normalization)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Pad)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Permute)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Pooling2d)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Reshape)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Resize)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Slice)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Softmax)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(SpaceToBatchNd)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(SpaceToDepth)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Splitter)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Stack)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(StridedSlice)
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 2217a32edf..6bc2dc7c65 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -12,54 +12,48 @@
namespace
{
-#define ADD_LAYER_METHOD_1_PARAM(name) net.Add##name##Layer("name##Layer")
-#define ADD_LAYER_METHOD_2_PARAM(name) net.Add##name##Layer(armnn::name##Descriptor(), "name##Layer")
-
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME(name, numParams) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME(name) \
BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
{ \
Test##name##LayerVisitor visitor("name##Layer"); \
armnn::Network net; \
- armnn::IConnectableLayer *const layer = ADD_LAYER_METHOD_##numParams##_PARAM(name); \
+ armnn::IConnectableLayer *const layer = net.Add##name##Layer("name##Layer"); \
layer->Accept(visitor); \
}
-#define ADD_LAYER_METHOD_NULLPTR_1_PARAM(name) net.Add##name##Layer()
-#define ADD_LAYER_METHOD_NULLPTR_2_PARAM(name) net.Add##name##Layer(armnn::name##Descriptor())
-
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, numParams) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name) \
BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptr) \
{ \
Test##name##LayerVisitor visitor; \
armnn::Network net; \
- armnn::IConnectableLayer *const layer = ADD_LAYER_METHOD_NULLPTR_##numParams##_PARAM(name); \
+ armnn::IConnectableLayer *const layer = net.Add##name##Layer(); \
layer->Accept(visitor); \
}
-#define TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME(name, 1) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, 1)
-
-#define TEST_SUITE_NAME_ONLY_LAYER_VISITOR_2_PARAM(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME(name, 2) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, 2)
+#define TEST_SUITE_NAME_ONLY_LAYER_VISITOR(name) \
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(name) \
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name)
} // anonymous namespace
BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Addition)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_2_PARAM(DepthToSpace)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Division)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Equal)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Floor)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Gather)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Greater)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Maximum)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Minimum)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Multiplication)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Rsqrt)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_2_PARAM(Slice)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR_1_PARAM(Subtraction)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Abs)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Addition)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Dequantize)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Division)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Equal)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Floor)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Gather)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Greater)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Maximum)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Merge)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Minimum)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rsqrt)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index 4e0aa2f989..a772cb3283 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -9,51 +9,36 @@
namespace
{
-// Defines a visitor function with 1 required parameter to be used
-// with layers that do not have a descriptor
-#define VISIT_METHOD_1_PARAM(name) \
-void Visit##name##Layer(const armnn::IConnectableLayer* layer, const char* layerName = nullptr) override
-
-// Defines a visitor function with 2 required parameters to be used
-// with layers that have a descriptor
-#define VISIT_METHOD_2_PARAM(name) \
-void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
- const armnn::name##Descriptor&, \
- const char* layerName = nullptr) override
-
-#define TEST_LAYER_VISITOR(name, numVisitorParams) \
+#define DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(name) \
class Test##name##LayerVisitor : public armnn::TestLayerVisitor \
{ \
public: \
explicit Test##name##LayerVisitor(const char* layerName = nullptr) : armnn::TestLayerVisitor(layerName) {}; \
\
- VISIT_METHOD_##numVisitorParams##_PARAM(name) \
+ void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
+ const char* layerName = nullptr) override \
{ \
CheckLayerPointer(layer); \
CheckLayerName(layerName); \
} \
};
-// Defines a test layer visitor class for a layer, of a given name,
-// that does not require a descriptor
-#define TEST_LAYER_VISITOR_1_PARAM(name) TEST_LAYER_VISITOR(name, 1)
-
-// Defines a test layer visitor class for a layer, of a given name,
-// that requires a descriptor
-#define TEST_LAYER_VISITOR_2_PARAM(name) TEST_LAYER_VISITOR(name, 2)
-
} // anonymous namespace
-TEST_LAYER_VISITOR_1_PARAM(Addition)
-TEST_LAYER_VISITOR_2_PARAM(DepthToSpace)
-TEST_LAYER_VISITOR_1_PARAM(Division)
-TEST_LAYER_VISITOR_1_PARAM(Equal)
-TEST_LAYER_VISITOR_1_PARAM(Floor)
-TEST_LAYER_VISITOR_1_PARAM(Gather)
-TEST_LAYER_VISITOR_1_PARAM(Greater)
-TEST_LAYER_VISITOR_1_PARAM(Maximum)
-TEST_LAYER_VISITOR_1_PARAM(Minimum)
-TEST_LAYER_VISITOR_1_PARAM(Multiplication)
-TEST_LAYER_VISITOR_1_PARAM(Rsqrt)
-TEST_LAYER_VISITOR_2_PARAM(Slice)
-TEST_LAYER_VISITOR_1_PARAM(Subtraction)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Abs)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Addition)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Dequantize)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Division)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Equal)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Floor)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Gather)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Greater)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Maximum)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Merge)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Minimum)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Multiplication)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Prelu)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Quantize)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Rsqrt)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Subtraction)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Switch)