From e886b518909880b370fddf43ff39296252d03909 Mon Sep 17 00:00:00 2001 From: Laurent Carlier Date: Thu, 16 Apr 2020 12:02:05 +0100 Subject: Use X macro for the enum class LayerType In order to improve the maintability of the LayerType enum, it is easier to use the X macro technique https://en.wikipedia.org/wiki/X_Macro Thanks to that, the pre-processor can generate some code based on the list provided by the LIST_OF_LAYER_TYPE macro Signed-off-by: Laurent Carlier Change-Id: I3a6049abfb1e964fe0bf32aa4e26bec4e29a77de --- src/armnn/InternalTypes.cpp | 63 +-------------------- src/armnn/InternalTypes.hpp | 135 +++++++++++++++++++++++--------------------- 2 files changed, 75 insertions(+), 123 deletions(-) diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index a9435b29f5..aebc721be3 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -14,66 +14,9 @@ char const* GetLayerTypeAsCString(LayerType type) { switch (type) { - case LayerType::Activation: return "Activation"; - case LayerType::Addition: return "Addition"; - case LayerType::ArgMinMax: return "ArgMinMax"; - case LayerType::BatchNormalization: return "BatchNormalization"; - case LayerType::BatchToSpaceNd: return "BatchToSpaceNd"; - case LayerType::Comparison: return "Comparison"; - case LayerType::Concat: return "Concat"; - case LayerType::Constant: return "Constant"; - case LayerType::ConvertBf16ToFp32: return "ConvertBf16ToFp32"; - case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32"; - case LayerType::ConvertFp32ToBf16: return "ConvertFp32ToBf16"; - case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16"; - case LayerType::Convolution2d: return "Convolution2d"; - case LayerType::Debug: return "Debug"; - case LayerType::DepthToSpace: return "DepthToSpace"; - case LayerType::DepthwiseConvolution2d: return "DepthwiseConvolution2d"; - case LayerType::Dequantize: return "Dequantize"; - case LayerType::DetectionPostProcess: return "DetectionPostProcess"; - case LayerType::Division: return "Division"; - case LayerType::ElementwiseUnary: return "ElementwiseUnary"; - case LayerType::FakeQuantization: return "FakeQuantization"; - case LayerType::Floor: return "Floor"; - case LayerType::FullyConnected: return "FullyConnected"; - case LayerType::Gather: return "Gather"; - case LayerType::Input: return "Input"; - case LayerType::InstanceNormalization: return "InstanceNormalization"; - case LayerType::L2Normalization: return "L2Normalization"; - case LayerType::LogSoftmax: return "LogSoftmax"; - case LayerType::Lstm: return "Lstm"; - case LayerType::Maximum: return "Maximum"; - case LayerType::Mean: return "Mean"; - case LayerType::MemCopy: return "MemCopy"; - case LayerType::MemImport: return "MemImport"; - case LayerType::Merge: return "Merge"; - case LayerType::Minimum: return "Minimum"; - case LayerType::Multiplication: return "Multiplication"; - case LayerType::Normalization: return "Normalization"; - case LayerType::Output: return "Output"; - case LayerType::Pad: return "Pad"; - case LayerType::Permute: return "Permute"; - case LayerType::Pooling2d: return "Pooling2d"; - case LayerType::PreCompiled: return "PreCompiled"; - case LayerType::Prelu: return "Prelu"; - case LayerType::Quantize: return "Quantize"; - case LayerType::QLstm: return "QLstm"; - case LayerType::QuantizedLstm: return "QuantizedLstm"; - case LayerType::Reshape: return "Reshape"; - case LayerType::Resize: return "Resize"; - case LayerType::Slice: return "Slice"; - case LayerType::Softmax: return "Softmax"; - case LayerType::SpaceToBatchNd: return "SpaceToBatchNd"; - case LayerType::SpaceToDepth: return "SpaceToDepth"; - case LayerType::Splitter: return "Splitter"; - case LayerType::Stack: return "Stack"; - case LayerType::StandIn: return "StandIn"; - case LayerType::StridedSlice: return "StridedSlice"; - case LayerType::Subtraction: return "Subtraction"; - case LayerType::Switch: return "Switch"; - case LayerType::TransposeConvolution2d: return "TransposeConvolution2d"; - case LayerType::Transpose: return "Transpose"; +#define X(name) case LayerType::name: return #name; + LIST_OF_LAYER_TYPE +#undef X default: ARMNN_ASSERT_MSG(false, "Unknown layer type"); return "Unknown"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index ee4a710d14..455cb60d5d 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -8,74 +8,83 @@ #include + +/// This list uses X macro technique. +/// See https://en.wikipedia.org/wiki/X_Macro for more info +#define LIST_OF_LAYER_TYPE \ + X(Activation) \ + X(Addition) \ + X(ArgMinMax) \ + X(BatchNormalization) \ + X(BatchToSpaceNd) \ + X(Comparison) \ + X(Concat) \ + X(Constant) \ + X(ConvertBf16ToFp32) \ + X(ConvertFp16ToFp32) \ + X(ConvertFp32ToBf16) \ + X(ConvertFp32ToFp16) \ + X(Convolution2d) \ + X(Debug) \ + X(DepthToSpace) \ + X(DepthwiseConvolution2d) \ + X(Dequantize) \ + X(DetectionPostProcess) \ + X(Division) \ + X(ElementwiseUnary) \ + X(FakeQuantization) \ + X(Floor) \ + X(FullyConnected) \ + X(Gather) \ + X(Input) \ + X(InstanceNormalization) \ + X(L2Normalization) \ + X(LogSoftmax) \ + X(Lstm) \ + X(QLstm) \ + X(Maximum) \ + X(Mean) \ + X(MemCopy) \ + X(MemImport) \ + X(Merge) \ + X(Minimum) \ + X(Multiplication) \ + X(Normalization) \ + X(Output) \ + X(Pad) \ + X(Permute) \ + X(Pooling2d) \ + X(PreCompiled) \ + X(Prelu) \ + X(Quantize) \ + X(QuantizedLstm) \ + X(Reshape) \ + X(Resize) \ + X(Slice) \ + X(Softmax) \ + X(SpaceToBatchNd) \ + X(SpaceToDepth) \ + X(Splitter) \ + X(Stack) \ + X(StandIn) \ + X(StridedSlice) \ + X(Subtraction) \ + X(Switch) \ + X(Transpose) \ + X(TransposeConvolution2d) + +/// When adding a new layer, adapt also the LastLayer enum value in the +/// enum class LayerType below namespace armnn { enum class LayerType { - FirstLayer, - Activation = FirstLayer, - Addition, - ArgMinMax, - BatchNormalization, - BatchToSpaceNd, - Comparison, - Concat, - Constant, - ConvertBf16ToFp32, - ConvertFp16ToFp32, - ConvertFp32ToBf16, - ConvertFp32ToFp16, - Convolution2d, - Debug, - DepthToSpace, - DepthwiseConvolution2d, - Dequantize, - DetectionPostProcess, - Division, - ElementwiseUnary, - FakeQuantization, - Floor, - FullyConnected, - Gather, - Input, - InstanceNormalization, - L2Normalization, - LogSoftmax, - Lstm, - Maximum, - Mean, - MemCopy, - MemImport, - Merge, - Minimum, - Multiplication, - Normalization, - Output, - Pad, - Permute, - Pooling2d, - PreCompiled, - Prelu, - Quantize, - QLstm, - QuantizedLstm, - Reshape, - Resize, - Slice, - Softmax, - SpaceToBatchNd, - SpaceToDepth, - Splitter, - Stack, - StandIn, - StridedSlice, - Subtraction, - Switch, - TransposeConvolution2d, - // Last layer goes here. - LastLayer, - Transpose = LastLayer +#define X(name) name, + LIST_OF_LAYER_TYPE +#undef X + FirstLayer = Activation, + LastLayer = TransposeConvolution2d }; const char* GetLayerTypeAsCString(LayerType type); -- cgit v1.2.1