From b454c5c65efb238c130b042ace390b2bc7f0bf75 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Tue, 9 Feb 2021 15:56:23 +0000 Subject: IVGCVSW-4893 Refactor ILayerVisitor using unified interface strategy. Signed-off-by: Jan Eilers Signed-off-by: Finn Williams Signed-off-by: Francis Murtagh Change-Id: Id7bc8255a8e3f9e5aac65d510bec8a559bf37246 --- Android.mk | 5 +- CMakeLists.txt | 17 +- include/armnn/Descriptors.hpp | 92 +- include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/INetwork.hpp | 10 + include/armnn/IStrategy.hpp | 31 + include/armnn/Types.hpp | 83 + src/armnn/DynamicQuantizationStrategy.cpp | 276 ++ src/armnn/DynamicQuantizationStrategy.hpp | 59 + src/armnn/DynamicQuantizationVisitor.cpp | 364 -- src/armnn/DynamicQuantizationVisitor.hpp | 149 - src/armnn/InternalTypes.hpp | 84 - src/armnn/Layer.cpp | 6 + src/armnn/Layer.hpp | 10 +- src/armnn/Network.cpp | 8 + src/armnn/Network.hpp | 2 + src/armnn/NetworkQuantizer.cpp | 30 +- src/armnn/NetworkQuantizer.hpp | 4 +- src/armnn/NetworkQuantizerUtils.hpp | 11 + src/armnn/OverrideInputRangeVisitor.hpp | 51 + src/armnn/QuantizerStrategy.cpp | 519 +++ src/armnn/QuantizerStrategy.hpp | 63 + src/armnn/QuantizerVisitor.cpp | 589 ---- src/armnn/QuantizerVisitor.hpp | 231 -- src/armnn/StaticRangeStrategy.cpp | 193 ++ src/armnn/StaticRangeStrategy.hpp | 41 + src/armnn/StaticRangeVisitor.cpp | 270 -- src/armnn/StaticRangeVisitor.hpp | 120 - src/armnn/layers/BatchNormalizationLayer.cpp | 10 + src/armnn/layers/BatchNormalizationLayer.hpp | 2 + src/armnn/layers/ConstantLayer.cpp | 6 + src/armnn/layers/ConstantLayer.hpp | 2 + src/armnn/layers/Convolution2dLayer.cpp | 12 + src/armnn/layers/Convolution2dLayer.hpp | 2 + src/armnn/layers/DepthwiseConvolution2dLayer.cpp | 12 + src/armnn/layers/DepthwiseConvolution2dLayer.hpp | 2 + src/armnn/layers/DetectionPostProcessLayer.cpp | 7 + src/armnn/layers/DetectionPostProcessLayer.hpp | 2 + src/armnn/layers/ElementwiseBaseLayer.cpp | 5 + src/armnn/layers/ElementwiseBaseLayer.hpp | 2 + src/armnn/layers/FakeQuantizationLayer.cpp | 6 + src/armnn/layers/FakeQuantizationLayer.hpp | 2 + src/armnn/layers/FullyConnectedLayer.cpp | 12 + src/armnn/layers/FullyConnectedLayer.hpp | 2 + src/armnn/layers/LayerWithParameters.hpp | 5 + src/armnn/layers/LstmLayer.cpp | 146 + src/armnn/layers/LstmLayer.hpp | 2 + src/armnn/layers/MemCopyLayer.cpp | 6 + src/armnn/layers/MemCopyLayer.hpp | 2 + src/armnn/layers/MemImportLayer.cpp | 6 + src/armnn/layers/MemImportLayer.hpp | 2 + src/armnn/layers/PreCompiledLayer.cpp | 6 + src/armnn/layers/PreCompiledLayer.hpp | 2 + src/armnn/layers/QLstmLayer.cpp | 126 + src/armnn/layers/QLstmLayer.hpp | 2 + src/armnn/layers/QuantizedLstmLayer.cpp | 87 + src/armnn/layers/QuantizedLstmLayer.hpp | 2 + src/armnn/layers/RankLayer.cpp | 5 + src/armnn/layers/RankLayer.hpp | 4 +- src/armnn/layers/TransposeConvolution2dLayer.cpp | 12 + src/armnn/layers/TransposeConvolution2dLayer.hpp | 2 + src/armnn/test/QuantizerTest.cpp | 2298 ++++--------- .../test/DeserializeReduceSum.cpp | 1 - src/armnnQuantizer/ArmNNQuantizerMain.cpp | 6 +- src/armnnQuantizer/QuantizationDataSet.cpp | 30 + src/armnnQuantizer/QuantizationDataSet.hpp | 16 + src/armnnSerializer/Serializer.cpp | 817 +++-- src/armnnSerializer/Serializer.hpp | 481 ++- .../test/ActivationSerializationTests.cpp | 19 +- .../test/ComparisonSerializationTests.cpp | 123 + .../test/LstmSerializationTests.cpp | 2199 +++++++++++++ src/armnnSerializer/test/SerializerTestUtils.cpp | 163 + src/armnnSerializer/test/SerializerTestUtils.hpp | 167 + src/armnnSerializer/test/SerializerTests.cpp | 3461 ++------------------ 74 files changed, 6490 insertions(+), 7113 deletions(-) create mode 100644 include/armnn/IStrategy.hpp create mode 100644 src/armnn/DynamicQuantizationStrategy.cpp create mode 100644 src/armnn/DynamicQuantizationStrategy.hpp delete mode 100644 src/armnn/DynamicQuantizationVisitor.cpp delete mode 100644 src/armnn/DynamicQuantizationVisitor.hpp create mode 100644 src/armnn/QuantizerStrategy.cpp create mode 100644 src/armnn/QuantizerStrategy.hpp delete mode 100644 src/armnn/QuantizerVisitor.cpp delete mode 100644 src/armnn/QuantizerVisitor.hpp create mode 100644 src/armnn/StaticRangeStrategy.cpp create mode 100644 src/armnn/StaticRangeStrategy.hpp delete mode 100644 src/armnn/StaticRangeVisitor.cpp delete mode 100644 src/armnn/StaticRangeVisitor.hpp create mode 100644 src/armnnSerializer/test/ComparisonSerializationTests.cpp create mode 100644 src/armnnSerializer/test/LstmSerializationTests.cpp create mode 100644 src/armnnSerializer/test/SerializerTestUtils.cpp create mode 100644 src/armnnSerializer/test/SerializerTestUtils.hpp diff --git a/Android.mk b/Android.mk index 6ada126893..aa89ff9292 100644 --- a/Android.mk +++ b/Android.mk @@ -426,7 +426,10 @@ LOCAL_SRC_FILES := \ src/profiling/test/TimelinePacketTests.cpp \ src/profiling/test/TimelineUtilityMethodsTests.cpp \ src/armnnSerializer/test/ActivationSerializationTests.cpp \ - src/armnnSerializer/test/SerializerTests.cpp + src/armnnSerializer/test/ComparisonSerializationTests.cpp \ + src/armnnSerializer/test/LstmSerializationTests.cpp \ + src/armnnSerializer/test/SerializerTests.cpp \ + src/armnnSerializer/test/SerializerTestUtils.cpp ifeq ($(ARMNN_REF_ENABLED),1) LOCAL_SRC_FILES += \ diff --git a/CMakeLists.txt b/CMakeLists.txt index c862c55687..4e75c28da0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -247,6 +247,7 @@ list(APPEND armnn_sources include/armnn/INetwork.hpp include/armnn/IProfiler.hpp include/armnn/IRuntime.hpp + include/armnn/IStrategy.hpp include/armnn/LayerSupport.hpp include/armnn/LayerVisitorBase.hpp include/armnn/Logging.hpp @@ -412,8 +413,8 @@ list(APPEND armnn_sources src/armnn/Descriptors.cpp src/armnn/DeviceSpec.hpp src/armnn/DllExport.hpp - src/armnn/DynamicQuantizationVisitor.cpp - src/armnn/DynamicQuantizationVisitor.hpp + src/armnn/DynamicQuantizationStrategy.cpp + src/armnn/DynamicQuantizationStrategy.hpp src/armnn/Exceptions.cpp src/armnn/ExecutionFrame.cpp src/armnn/ExecutionFrame.hpp @@ -456,8 +457,8 @@ list(APPEND armnn_sources src/armnn/ProfilingEvent.cpp src/armnn/ProfilingEvent.hpp src/armnn/Profiling.hpp - src/armnn/QuantizerVisitor.cpp - src/armnn/QuantizerVisitor.hpp + src/armnn/QuantizerStrategy.hpp + src/armnn/QuantizerStrategy.cpp src/armnn/Runtime.cpp src/armnn/Runtime.hpp src/armnn/RangeTracker.cpp @@ -465,8 +466,8 @@ list(APPEND armnn_sources src/armnn/ResolveType.hpp src/armnn/SerializeLayerParameters.cpp src/armnn/SerializeLayerParameters.hpp - src/armnn/StaticRangeVisitor.cpp - src/armnn/StaticRangeVisitor.hpp + src/armnn/StaticRangeStrategy.cpp + src/armnn/StaticRangeStrategy.hpp src/armnn/SubgraphView.cpp src/armnn/SubgraphView.hpp src/armnn/SubgraphViewSelector.cpp @@ -909,7 +910,11 @@ if(BUILD_UNIT_TESTS) enable_language(ASM) list(APPEND unittest_sources src/armnnSerializer/test/ActivationSerializationTests.cpp + src/armnnSerializer/test/ComparisonSerializationTests.cpp + src/armnnSerializer/test/LstmSerializationTests.cpp src/armnnSerializer/test/SerializerTests.cpp + src/armnnSerializer/test/SerializerTestUtils.cpp + src/armnnSerializer/test/SerializerTestUtils.hpp src/armnnDeserializer/test/DeserializeAbs.cpp src/armnnDeserializer/test/DeserializeActivation.cpp src/armnnDeserializer/test/DeserializeAdd.cpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index a8e68aa8c1..20511ab00f 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -9,6 +9,8 @@ #include #include +#include +#include #include "Tensor.hpp" #include "Types.hpp" @@ -16,8 +18,11 @@ namespace armnn { +/// Base class for all descriptors. +struct BaseDescriptor {}; + /// An ActivationDescriptor for the ActivationLayer. -struct ActivationDescriptor +struct ActivationDescriptor : BaseDescriptor { ActivationDescriptor() : m_Function(ActivationFunction::Sigmoid) @@ -48,7 +53,7 @@ struct ActivationDescriptor }; /// An ArgMinMaxDescriptor for ArgMinMaxLayer -struct ArgMinMaxDescriptor +struct ArgMinMaxDescriptor : BaseDescriptor { ArgMinMaxDescriptor() : m_Function(ArgMinMaxFunction::Min) @@ -70,7 +75,7 @@ struct ArgMinMaxDescriptor }; /// A ComparisonDescriptor for the ComparisonLayer -struct ComparisonDescriptor +struct ComparisonDescriptor : BaseDescriptor { ComparisonDescriptor() : ComparisonDescriptor(ComparisonOperation::Equal) @@ -90,7 +95,7 @@ struct ComparisonDescriptor }; /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer -struct ElementwiseUnaryDescriptor +struct ElementwiseUnaryDescriptor : BaseDescriptor { ElementwiseUnaryDescriptor() : ElementwiseUnaryDescriptor(UnaryOperation::Abs) @@ -110,7 +115,7 @@ struct ElementwiseUnaryDescriptor }; /// A PermuteDescriptor for the PermuteLayer. -struct PermuteDescriptor +struct PermuteDescriptor : BaseDescriptor { PermuteDescriptor() : m_DimMappings{} @@ -131,7 +136,7 @@ struct PermuteDescriptor }; /// A SoftmaxDescriptor for the SoftmaxLayer. -struct SoftmaxDescriptor +struct SoftmaxDescriptor : BaseDescriptor { SoftmaxDescriptor() : m_Beta(1.0f) @@ -155,7 +160,7 @@ using LogSoftmaxDescriptor = SoftmaxDescriptor; /// @brief An OriginsDescriptor for the ConcatLayer. /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc. -struct OriginsDescriptor +struct OriginsDescriptor : BaseDescriptor { OriginsDescriptor(); OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4); @@ -198,7 +203,7 @@ private: /// @brief A ViewsDescriptor for the SplitterLayer. /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc. -struct ViewsDescriptor +struct ViewsDescriptor : BaseDescriptor { ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4); ViewsDescriptor(const ViewsDescriptor& other); @@ -321,7 +326,7 @@ OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, } /// A Pooling2dDescriptor for the Pooling2dLayer. -struct Pooling2dDescriptor +struct Pooling2dDescriptor : BaseDescriptor { Pooling2dDescriptor() : m_PoolType(PoolingAlgorithm::Max) @@ -381,7 +386,7 @@ struct Pooling2dDescriptor }; /// A FullyConnectedDescriptor for the FullyConnectedLayer. -struct FullyConnectedDescriptor +struct FullyConnectedDescriptor : BaseDescriptor { FullyConnectedDescriptor() : m_BiasEnabled(false) @@ -400,7 +405,7 @@ struct FullyConnectedDescriptor }; /// A Convolution2dDescriptor for the Convolution2dLayer. -struct Convolution2dDescriptor +struct Convolution2dDescriptor : BaseDescriptor { Convolution2dDescriptor() : m_PadLeft(0) @@ -452,7 +457,7 @@ struct Convolution2dDescriptor }; /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer. -struct DepthwiseConvolution2dDescriptor +struct DepthwiseConvolution2dDescriptor : BaseDescriptor { DepthwiseConvolution2dDescriptor() : m_PadLeft(0) @@ -503,7 +508,7 @@ struct DepthwiseConvolution2dDescriptor DataLayout m_DataLayout; }; -struct DetectionPostProcessDescriptor +struct DetectionPostProcessDescriptor : BaseDescriptor { DetectionPostProcessDescriptor() : m_MaxDetections(0) @@ -559,7 +564,7 @@ struct DetectionPostProcessDescriptor }; /// A NormalizationDescriptor for the NormalizationLayer. -struct NormalizationDescriptor +struct NormalizationDescriptor : BaseDescriptor { NormalizationDescriptor() : m_NormChannelType(NormalizationAlgorithmChannel::Across) @@ -599,7 +604,7 @@ struct NormalizationDescriptor }; /// A L2NormalizationDescriptor for the L2NormalizationLayer. -struct L2NormalizationDescriptor +struct L2NormalizationDescriptor : BaseDescriptor { L2NormalizationDescriptor() : m_Eps(1e-12f) @@ -618,7 +623,7 @@ struct L2NormalizationDescriptor }; /// A BatchNormalizationDescriptor for the BatchNormalizationLayer. -struct BatchNormalizationDescriptor +struct BatchNormalizationDescriptor : BaseDescriptor { BatchNormalizationDescriptor() : m_Eps(0.0001f) @@ -637,7 +642,7 @@ struct BatchNormalizationDescriptor }; /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer -struct InstanceNormalizationDescriptor +struct InstanceNormalizationDescriptor : BaseDescriptor { InstanceNormalizationDescriptor() : m_Gamma(1.0f) @@ -665,7 +670,7 @@ struct InstanceNormalizationDescriptor }; /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer. -struct BatchToSpaceNdDescriptor +struct BatchToSpaceNdDescriptor : BaseDescriptor { BatchToSpaceNdDescriptor() : m_BlockShape({1, 1}) @@ -696,7 +701,7 @@ struct BatchToSpaceNdDescriptor }; /// A FakeQuantizationDescriptor for the FakeQuantizationLayer. -struct FakeQuantizationDescriptor +struct FakeQuantizationDescriptor : BaseDescriptor { FakeQuantizationDescriptor() : m_Min(-6.0f) @@ -715,7 +720,7 @@ struct FakeQuantizationDescriptor }; /// A FillDescriptor for the FillLayer -struct FillDescriptor +struct FillDescriptor : BaseDescriptor { FillDescriptor() : m_Value(0) @@ -734,7 +739,7 @@ struct FillDescriptor }; /// A GatherDescriptor for the GatherLayer. -struct GatherDescriptor +struct GatherDescriptor : BaseDescriptor { GatherDescriptor() : m_Axis(0) @@ -754,7 +759,7 @@ struct GatherDescriptor }; /// A ResizeBilinearDescriptor for the ResizeBilinearLayer. -struct ResizeBilinearDescriptor +struct ResizeBilinearDescriptor : BaseDescriptor { ResizeBilinearDescriptor() : m_TargetWidth(0) @@ -764,6 +769,15 @@ struct ResizeBilinearDescriptor , m_HalfPixelCenters(false) {} + bool operator ==(const ResizeBilinearDescriptor& rhs) const + { + return m_TargetWidth == rhs.m_TargetWidth && + m_TargetHeight == rhs.m_TargetHeight && + m_DataLayout == rhs.m_DataLayout && + m_AlignCorners == rhs.m_AlignCorners && + m_HalfPixelCenters == rhs.m_HalfPixelCenters; + } + /// Target width value. uint32_t m_TargetWidth; /// Target height value. @@ -777,7 +791,7 @@ struct ResizeBilinearDescriptor }; /// A ResizeDescriptor for the ResizeLayer. -struct ResizeDescriptor +struct ResizeDescriptor : BaseDescriptor { ResizeDescriptor() : m_TargetWidth(0) @@ -815,7 +829,7 @@ struct ResizeDescriptor /// A ReshapeDescriptor for the ReshapeLayer. -struct ReshapeDescriptor +struct ReshapeDescriptor : BaseDescriptor { ReshapeDescriptor() : m_TargetShape() @@ -835,7 +849,7 @@ struct ReshapeDescriptor }; /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer. -struct SpaceToBatchNdDescriptor +struct SpaceToBatchNdDescriptor : BaseDescriptor { SpaceToBatchNdDescriptor() : m_BlockShape({1, 1}) @@ -867,7 +881,7 @@ struct SpaceToBatchNdDescriptor }; /// A SpaceToDepthDescriptor for the SpaceToDepthLayer -struct SpaceToDepthDescriptor +struct SpaceToDepthDescriptor : BaseDescriptor { SpaceToDepthDescriptor() : SpaceToDepthDescriptor(1u, DataLayout::NHWC) @@ -894,7 +908,7 @@ struct SpaceToDepthDescriptor using DepthToSpaceDescriptor = SpaceToDepthDescriptor; /// An LstmDescriptor for the LstmLayer. -struct LstmDescriptor +struct LstmDescriptor : BaseDescriptor { LstmDescriptor() : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid @@ -934,7 +948,7 @@ struct LstmDescriptor }; /// A MeanDescriptor for the MeanLayer. -struct MeanDescriptor +struct MeanDescriptor : BaseDescriptor { MeanDescriptor() : m_Axis() @@ -958,7 +972,7 @@ struct MeanDescriptor }; /// A PadDescriptor for the PadLayer. -struct PadDescriptor +struct PadDescriptor : BaseDescriptor { PadDescriptor() : m_PadValue(0) {} @@ -984,7 +998,7 @@ struct PadDescriptor }; /// A SliceDescriptor for the SliceLayer. -struct SliceDescriptor +struct SliceDescriptor : BaseDescriptor { SliceDescriptor(const std::vector& begin, const std::vector& size) : m_Begin(begin) @@ -1007,7 +1021,7 @@ struct SliceDescriptor }; /// A StackDescriptor for the StackLayer. -struct StackDescriptor +struct StackDescriptor : BaseDescriptor { StackDescriptor() : m_Axis(0) @@ -1037,7 +1051,7 @@ struct StackDescriptor }; /// A StandInDescriptor for the StandIn layer -struct StandInDescriptor +struct StandInDescriptor : BaseDescriptor { StandInDescriptor() {}; @@ -1059,7 +1073,7 @@ struct StandInDescriptor }; /// A StridedSliceDescriptor for the StridedSliceLayer. -struct StridedSliceDescriptor +struct StridedSliceDescriptor : BaseDescriptor { StridedSliceDescriptor(const std::vector& begin, const std::vector& end, @@ -1123,7 +1137,7 @@ struct StridedSliceDescriptor }; /// A PreCompiledDescriptor for the PreCompiledLayer. -struct PreCompiledDescriptor +struct PreCompiledDescriptor : BaseDescriptor { PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u) : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots) @@ -1136,7 +1150,7 @@ struct PreCompiledDescriptor }; /// A QLstmDescriptor for the QLstmLayer. -struct QLstmDescriptor +struct QLstmDescriptor : BaseDescriptor { QLstmDescriptor() : m_CellClip(0.0) @@ -1196,7 +1210,7 @@ struct QLstmDescriptor }; /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer. -struct TransposeConvolution2dDescriptor +struct TransposeConvolution2dDescriptor : BaseDescriptor { TransposeConvolution2dDescriptor() : m_PadLeft(0), @@ -1246,7 +1260,7 @@ struct TransposeConvolution2dDescriptor }; /// A TransposeDescriptor for the TransposeLayer. -struct TransposeDescriptor +struct TransposeDescriptor : BaseDescriptor { TransposeDescriptor() : m_DimMappings{} @@ -1267,7 +1281,7 @@ struct TransposeDescriptor }; /// A LogicalBinaryDescriptor for the LogicalBinaryLayer -struct LogicalBinaryDescriptor +struct LogicalBinaryDescriptor : BaseDescriptor { LogicalBinaryDescriptor() : LogicalBinaryDescriptor(LogicalBinaryOperation::LogicalAnd) @@ -1287,7 +1301,7 @@ struct LogicalBinaryDescriptor }; /// A ReduceDescriptor for the REDUCE operators. -struct ReduceDescriptor +struct ReduceDescriptor : BaseDescriptor { ReduceDescriptor() : m_KeepDims(false) diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index 054ce51144..4e7082e88f 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -7,6 +7,7 @@ namespace armnn { +struct BaseDescriptor; struct ActivationDescriptor; struct ArgMinMaxDescriptor; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index ca1b725d48..c667d9ce8b 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -91,8 +92,15 @@ public: /// Apply a visitor to this layer virtual void Accept(ILayerVisitor& visitor) const = 0; + /// Apply a visitor to this layer + virtual void ExecuteStrategy(IStrategy& strategy) const = 0; + /// Provide a hint for the optimizer as to which backend to prefer for this layer virtual void BackendSelectionHint(Optional backend) = 0; + + /// Returns the armnn::LayerType of this layer + virtual LayerType GetType() const = 0; + protected: /// Objects are not deletable via the handle ~IConnectableLayer() {} @@ -600,6 +608,8 @@ public: virtual void Accept(ILayerVisitor& visitor) const = 0; + virtual void ExecuteStrategy(IStrategy& strategy) const = 0; + protected: ~INetwork() {} }; diff --git a/include/armnn/IStrategy.hpp b/include/armnn/IStrategy.hpp new file mode 100644 index 0000000000..8d29565dcc --- /dev/null +++ b/include/armnn/IStrategy.hpp @@ -0,0 +1,31 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include +#include + +namespace armnn +{ + +class IStrategy +{ +protected: +IStrategy() {} +virtual ~IStrategy() {} + +public: +virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) = 0; + +virtual void FinishStrategy() {}; + +}; + + +} // namespace armnn diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index 22004bd0a4..e1ff46b023 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -344,6 +344,89 @@ struct ProfilingStaticGuid : public ProfilingGuid } // namespace profiling +/// This list uses X macro technique. +/// See https://en.wikipedia.org/wiki/X_Macro for more info +#define LIST_OF_LAYER_TYPE \ + X(Activation) \ + X(Addition) \ + X(ArgMinMax) \ + X(BatchNormalization) \ + X(BatchToSpaceNd) \ + X(Comparison) \ + X(Concat) \ + X(Constant) \ + X(ConvertBf16ToFp32) \ + X(ConvertFp16ToFp32) \ + X(ConvertFp32ToBf16) \ + X(ConvertFp32ToFp16) \ + X(Convolution2d) \ + X(Debug) \ + X(DepthToSpace) \ + X(DepthwiseConvolution2d) \ + X(Dequantize) \ + X(DetectionPostProcess) \ + X(Division) \ + X(ElementwiseUnary) \ + X(FakeQuantization) \ + X(Fill) \ + X(Floor) \ + X(FullyConnected) \ + X(Gather) \ + X(Input) \ + X(InstanceNormalization) \ + X(L2Normalization) \ + X(LogicalBinary) \ + X(LogSoftmax) \ + X(Lstm) \ + X(QLstm) \ + X(Map) \ + X(Maximum) \ + X(Mean) \ + X(MemCopy) \ + X(MemImport) \ + X(Merge) \ + X(Minimum) \ + X(Multiplication) \ + X(Normalization) \ + X(Output) \ + X(Pad) \ + X(Permute) \ + X(Pooling2d) \ + X(PreCompiled) \ + X(Prelu) \ + X(Quantize) \ + X(QuantizedLstm) \ + X(Reshape) \ + X(Rank) \ + X(Resize) \ + X(Reduce) \ + X(Slice) \ + X(Softmax) \ + X(SpaceToBatchNd) \ + X(SpaceToDepth) \ + X(Splitter) \ + X(Stack) \ + X(StandIn) \ + X(StridedSlice) \ + X(Subtraction) \ + X(Switch) \ + X(Transpose) \ + X(TransposeConvolution2d) \ + X(Unmap) + +/// When adding a new layer, adapt also the LastLayer enum value in the +/// enum class LayerType below +enum class LayerType +{ +#define X(name) name, + LIST_OF_LAYER_TYPE +#undef X + FirstLayer = Activation, + LastLayer = Unmap +}; + +const char* GetLayerTypeAsCString(LayerType type); + } // namespace armnn diff --git a/src/armnn/DynamicQuantizationStrategy.cpp b/src/armnn/DynamicQuantizationStrategy.cpp new file mode 100644 index 0000000000..d354a0e441 --- /dev/null +++ b/src/armnn/DynamicQuantizationStrategy.cpp @@ -0,0 +1,276 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "DynamicQuantizationStrategy.hpp" +#include "NetworkUtils.hpp" + +#include +#include +#include +#include + +#include + +namespace armnn +{ +DynamicQuantizationStrategy::DynamicQuantizationStrategy(RangeTracker& rangeTracker, Graph& graph) + : m_RangeTracker(rangeTracker), + m_Graph(graph) +{} + +void DynamicQuantizationStrategy::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max) +{ + m_RangeTracker.SetRange(layer, outputIdx, min, max); +} + +void DynamicQuantizationStrategy::ForwardParentParameters(const IConnectableLayer* layer) +{ + for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) + { + const IOutputSlot *outputSlot = layer->GetInputSlot(i).GetConnection(); + LayerGuid previousLayerId = outputSlot->GetOwningLayerGuid(); + unsigned int ownerIndex = outputSlot->CalculateIndexOnOwner(); + const auto parentRange = m_RangeTracker.GetRange(previousLayerId, ownerIndex); + SetRange(layer, i, parentRange.first, parentRange.second); + } +} + +void DynamicQuantizationStrategy::AddToCalibratedLayers(const IConnectableLayer* layer) +{ + m_LayersToCalibrate.push_back(layer); +} + +void DynamicQuantizationStrategy::AddToNonCalibratedLayers(const IConnectableLayer* layer) +{ + m_LayersNotToCalibrate.push_back(layer); +} + +void DynamicQuantizationStrategy::FinishStrategy() +{ + for (const IConnectableLayer* layer : m_LayersToCalibrate) + { + std::vector newDebugLayers = InsertDebugLayerAfter( + m_Graph, *PolymorphicDowncast(const_cast(layer))); + // record them so we can take them out again efficiently afterward + m_DebugLayers.insert(std::end(m_DebugLayers), std::begin(newDebugLayers), std::end(newDebugLayers)); + } +} + +void DynamicQuantizationStrategy::RemoveDebugLayers() +{ + for (DebugLayer* debugLayer : m_DebugLayers) + { + OutputSlot& proceedingOutputSlot = *debugLayer->GetInputSlot(0).GetConnectedOutputSlot(); + proceedingOutputSlot.Disconnect(debugLayer->GetInputSlot(0)); + + for (InputSlot* succeedingInputSlot : debugLayer->GetOutputSlot(0).GetConnections()) + { + debugLayer->GetOutputSlot(0).Disconnect(*succeedingInputSlot); + proceedingOutputSlot.Connect(*succeedingInputSlot); + } + m_Graph.EraseLayer(debugLayer); + } + m_DebugLayers.clear(); +} + +void DynamicQuantizationStrategy::VisitNonCalibratedLayers() { + RemoveDebugLayers(); + for (const IConnectableLayer* layer : m_LayersNotToCalibrate) + { + ForwardParentParameters(layer); + } +} + + +void DynamicQuantizationStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) +{ + IgnoreUnused(name); + IgnoreUnused(id); + IgnoreUnused(descriptor); + + switch (layer->GetType()) + { + case armnn::LayerType::Activation : + { + const ActivationDescriptor& activationDescriptor = static_cast(descriptor); + switch (activationDescriptor.m_Function) + { + // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu + case ActivationFunction::Abs: + case ActivationFunction::Linear: + case ActivationFunction::ReLu: + case ActivationFunction::SoftReLu: + SetRange(layer, 0, 0.f, 15.f); + break; + case ActivationFunction::BoundedReLu: + SetRange(layer, 0, 0.f, activationDescriptor.m_A); + break; + case ActivationFunction::TanH: + SetRange(layer, 0, -1.f, 1.f); + break; + case ActivationFunction::LeakyReLu: + SetRange(layer, 0, -5.f, 15.f); + break; + default: + SetRange(layer, 0, -15.f, 15.f); + break; + } + break; + } + case armnn::LayerType::Addition : + { + SetRange(layer, 0, -20.f, 20.f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::ArgMinMax : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::BatchNormalization : + { + SetRange(layer, 0, -15.0f, 15.0f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::Normalization: + { + SetRange(layer, 0, -15.0f, 15.0f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::Convolution2d: + { + SetRange(layer, 0, -15.0f, 15.0f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::DepthwiseConvolution2d: + { + SetRange(layer, 0, -15.0f, 15.0f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::FullyConnected : + { + SetRange(layer, 0, -15.0f, 15.0f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::Permute : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::SpaceToBatchNd : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::Pooling2d : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::Softmax : + { + SetRange(layer, 0, 0.f, 1.f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::Constant : + { + if (constants[0].GetDataType() != DataType::Float32) + { + throw InvalidArgumentException("Quantization is supported only for FP32 tensors"); + } + + // Work out the range based on the input constants + unsigned int inputNumElements = constants[0].GetNumElements(); + const float* inputData = reinterpret_cast(constants[0].GetMemoryArea()); + + float min = std::numeric_limits::max(); + float max = std::numeric_limits::lowest(); + + for (unsigned int i = 0; i < inputNumElements; i++) + { + const float inputValue = inputData[i]; + + min = std::min(min, inputValue); + max = std::max(max, inputValue); + } + SetRange(layer, 0, min, max); + break; + } + case armnn::LayerType::Concat : + { + float min = std::numeric_limits::max(); + float max = std::numeric_limits::lowest(); + for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) + { + const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection(); + LayerGuid layerId = outputSlot->GetOwningLayerGuid(); + unsigned int slotIndex = outputSlot->CalculateIndexOnOwner(); + RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex); + min = std::min(min, range.first); + max = std::max(max, range.second); + } + SetRange(layer, 0, min, max); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::Reshape : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::Splitter : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::Resize : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::StridedSlice : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::BatchToSpaceNd : + { + AddToNonCalibratedLayers(layer); + break; + } + case armnn::LayerType::Input : + { + SetRange(layer, 0, -0.0f, 0.0f); + AddToCalibratedLayers(layer); + break; + } + case armnn::LayerType::Output : + { + AddToNonCalibratedLayers(layer); + m_OutputLayers.push_back(id); + break; + } + default: + {} + } +} + +const std::vector& DynamicQuantizationStrategy::GetOutputLayers() +{ + return m_OutputLayers; +} + +} //namespace armnn diff --git a/src/armnn/DynamicQuantizationStrategy.hpp b/src/armnn/DynamicQuantizationStrategy.hpp new file mode 100644 index 0000000000..aa77a4b563 --- /dev/null +++ b/src/armnn/DynamicQuantizationStrategy.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/LayerVisitorBase.hpp" +#include "RangeTracker.hpp" +#include "layers/DebugLayer.hpp" + +#include +#include + +namespace armnn +{ + +/// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine. +class DynamicQuantizationStrategy : public armnn::IStrategy +{ +public: + + DynamicQuantizationStrategy(RangeTracker& rangeTracker, Graph& graph); + ~DynamicQuantizationStrategy() = default; + + virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override; + + const std::vector& GetOutputLayers(); + void VisitNonCalibratedLayers(); + void FinishStrategy() override; + + +private: + /// Set the range for an output slot on a layer + void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max); + + void ForwardParentParameters(const IConnectableLayer* layer); + + /// Mapping from a layer Guid to an array of ranges for outputs + RangeTracker& m_RangeTracker; + + Graph& m_Graph; + + std::vector m_LayersToCalibrate; + std::vector m_LayersNotToCalibrate; + std::vector m_DebugLayers; + + std::vector m_OutputLayers; + void AddToCalibratedLayers(const IConnectableLayer* layer); + void AddToNonCalibratedLayers(const IConnectableLayer* layer); + void RemoveDebugLayers(); + + +}; +} //namespace armnn diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp deleted file mode 100644 index 02e7699eed..0000000000 --- a/src/armnn/DynamicQuantizationVisitor.cpp +++ /dev/null @@ -1,364 +0,0 @@ -// -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "DynamicQuantizationVisitor.hpp" -#include "NetworkUtils.hpp" - -#include -#include -#include -#include - -#include - -namespace armnn -{ - -DynamicQuantizationVisitor::DynamicQuantizationVisitor(RangeTracker& rangeTracker, Graph& graph) - : m_RangeTracker(rangeTracker), - m_Graph(graph) -{} - -void DynamicQuantizationVisitor::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max) -{ - m_RangeTracker.SetRange(layer, outputIdx, min, max); -} - -void DynamicQuantizationVisitor::ForwardParentParameters(const IConnectableLayer* layer) -{ - for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) - { - const IOutputSlot *outputSlot = layer->GetInputSlot(i).GetConnection(); - LayerGuid previousLayerId = outputSlot->GetOwningLayerGuid(); - unsigned int ownerIndex = outputSlot->CalculateIndexOnOwner(); - const auto parentRange = m_RangeTracker.GetRange(previousLayerId, ownerIndex); - SetRange(layer, i, parentRange.first, parentRange.second); - } -} - -void DynamicQuantizationVisitor::AddToCalibratedLayers(const IConnectableLayer* layer) -{ - m_LayersToCalibrate.push_back(layer); -} - -void DynamicQuantizationVisitor::AddToNonCalibratedLayers(const IConnectableLayer* layer) -{ - m_LayersNotToCalibrate.push_back(layer); -} - -void DynamicQuantizationVisitor::FinishVisit() -{ - for (const IConnectableLayer* layer : m_LayersToCalibrate) - { - std::vector newDebugLayers = InsertDebugLayerAfter( - m_Graph, *PolymorphicDowncast(const_cast(layer))); - // record them so we can take them out again efficiently afterward - m_DebugLayers.insert(std::end(m_DebugLayers), std::begin(newDebugLayers), std::end(newDebugLayers)); - } -} - -void DynamicQuantizationVisitor::RemoveDebugLayers() -{ - for (DebugLayer* debugLayer : m_DebugLayers) - { - OutputSlot& proceedingOutputSlot = *debugLayer->GetInputSlot(0).GetConnectedOutputSlot(); - proceedingOutputSlot.Disconnect(debugLayer->GetInputSlot(0)); - - for (InputSlot* succeedingInputSlot : debugLayer->GetOutputSlot(0).GetConnections()) - { - debugLayer->GetOutputSlot(0).Disconnect(*succeedingInputSlot); - proceedingOutputSlot.Connect(*succeedingInputSlot); - } - m_Graph.EraseLayer(debugLayer); - } - m_DebugLayers.clear(); -} - -void DynamicQuantizationVisitor::VisitNonCalibratedLayers() { - RemoveDebugLayers(); - for (const IConnectableLayer* layer : m_LayersNotToCalibrate) - { - ForwardParentParameters(layer); - } -} - -void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, - const char* name) -{ - IgnoreUnused(name); - SetRange(layer, 0, -20.f, 20.f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitAbsLayer(const IConnectableLayer* layer, - const char* name) -{ - IgnoreUnused(name); - SetRange(layer, 0, -20.f, 20.f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor& desc, - const char* name) -{ - IgnoreUnused(name); - IgnoreUnused(desc); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name) -{ - IgnoreUnused(desc); - IgnoreUnused(mean); - IgnoreUnused(variance); - IgnoreUnused(beta); - IgnoreUnused(gamma); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitNormalizationLayer(const IConnectableLayer* layer, - const NormalizationDescriptor& desc, - const char* name) -{ - IgnoreUnused(desc); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - IgnoreUnused(convolution2dDescriptor); - IgnoreUnused(weights); - IgnoreUnused(biases); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, - const DepthwiseConvolution2dDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - IgnoreUnused(desc); - IgnoreUnused(weights); - IgnoreUnused(biases); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& activationDescriptor, - const char* name) -{ - IgnoreUnused(name, activationDescriptor); - switch (activationDescriptor.m_Function) - { - // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu - case ActivationFunction::Abs: - case ActivationFunction::Linear: - case ActivationFunction::ReLu: - case ActivationFunction::SoftReLu: - SetRange(layer, 0, 0.f, 15.f); - break; - case ActivationFunction::BoundedReLu: - SetRange(layer, 0, 0.f, activationDescriptor.m_A); - break; - case ActivationFunction::TanH: - SetRange(layer, 0, -1.f, 1.f); - break; - case ActivationFunction::LeakyReLu: - SetRange(layer, 0, -5.f, 15.f); - break; - default: - SetRange(layer, 0, -15.f, 15.f); - break; - } - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char *name) -{ - IgnoreUnused(desc); - IgnoreUnused(weights); - IgnoreUnused(biases); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& permuteDescriptor, - const char* name) -{ - IgnoreUnused(permuteDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name) -{ - IgnoreUnused(spaceToBatchNdDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& pooling2dDescriptor, - const char* name) -{ - IgnoreUnused(pooling2dDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& softmaxDescriptor, - const char* name) -{ - IgnoreUnused(softmaxDescriptor); - IgnoreUnused(name); - SetRange(layer, 0, 0.f, 1.f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name) -{ - IgnoreUnused(name); - - if (input.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException("Quantization is supported only for FP32 tensors"); - } - - // Work out the range based on the input constants - unsigned int inputNumElements = input.GetNumElements(); - const float* inputData = reinterpret_cast(input.GetMemoryArea()); - - float min = std::numeric_limits::max(); - float max = std::numeric_limits::lowest(); - - for (unsigned int i = 0; i < inputNumElements; i++) - { - const float inputValue = inputData[i]; - - min = std::min(min, inputValue); - max = std::max(max, inputValue); - } - SetRange(layer, 0, min, max); -} - -void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer, - const ConcatDescriptor& originsDescriptor, - const char* name) -{ - IgnoreUnused(name); - IgnoreUnused(originsDescriptor); - float min = std::numeric_limits::max(); - float max = std::numeric_limits::lowest(); - for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) - { - const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection(); - LayerGuid layerId = outputSlot->GetOwningLayerGuid(); - unsigned int slotIndex = outputSlot->CalculateIndexOnOwner(); - RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex); - min = std::min(min, range.first); - max = std::max(max, range.second); - } - SetRange(layer, 0, min, max); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name) -{ - IgnoreUnused(reshapeDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& splitterDescriptor, - const char* name) -{ - IgnoreUnused(splitterDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name) -{ - IgnoreUnused(resizeDesc); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& stridedSliceDescriptor, - const char* name) -{ - IgnoreUnused(stridedSliceDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name) -{ - IgnoreUnused(batchToSpaceNdDescriptor); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name) -{ - IgnoreUnused(id); - IgnoreUnused(name); - SetRange(layer, 0, -0.0f, 0.0f); - AddToCalibratedLayers(layer); -} - -void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name) -{ - IgnoreUnused(id); - IgnoreUnused(name); - AddToNonCalibratedLayers(layer); - m_OutputLayers.push_back(id); -} - -const std::vector& DynamicQuantizationVisitor::GetOutputLayers() -{ - return m_OutputLayers; -} - -} //namespace armnn diff --git a/src/armnn/DynamicQuantizationVisitor.hpp b/src/armnn/DynamicQuantizationVisitor.hpp deleted file mode 100644 index 358e47187e..0000000000 --- a/src/armnn/DynamicQuantizationVisitor.hpp +++ /dev/null @@ -1,149 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "armnn/LayerVisitorBase.hpp" -#include "RangeTracker.hpp" -#include "layers/DebugLayer.hpp" - -#include -#include - -namespace armnn -{ - -/// Visitor class to establish min/max ranges based on the type of the layer -class DynamicQuantizationVisitor : public LayerVisitorBase -{ -public: - DynamicQuantizationVisitor(RangeTracker& rangeTracker, Graph& graph); - ~DynamicQuantizationVisitor() = default; - - /// Functions to set the Range on a per-layer-type basis - void VisitAbsLayer(const IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitAdditionLayer(const IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor& desc, - const char* name = nullptr) override; - - void VisitNormalizationLayer(const IConnectableLayer* layer, - const NormalizationDescriptor& desc, - const char* name = nullptr) override ; - - void VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name = nullptr) override; - - void VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, - const DepthwiseConvolution2dDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& activationDescriptor, - const char* name = nullptr) override; - - void VisitFullyConnectedLayer(const IConnectableLayer *layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char *name) override; - - void VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& permuteDescriptor, - const char* name) override; - - void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name = nullptr) override; - - void VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& pooling2dDescriptor, - const char* name) override; - - void VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& softmaxDescriptor, - const char* name = nullptr) override; - - void VisitConcatLayer(const IConnectableLayer* layer, - const ConcatDescriptor& originsDescriptor, - const char* name = nullptr) override; - - void VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name = nullptr) override; - - void VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name = nullptr) override; - - void VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& splitterDescriptor, - const char* name = nullptr) override; - - void VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name = nullptr) override; - - void VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& stridedSliceDescriptor, - const char* name = nullptr) override; - - void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name = nullptr) override; - - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override; - - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override; - - void FinishVisit() override; - void VisitNonCalibratedLayers(); - - const std::vector& GetOutputLayers(); - -private: - /// Set the range for an output slot on a layer - void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max); - - void ForwardParentParameters(const IConnectableLayer* layer); - - /// Mapping from a layer Guid to an array of ranges for outputs - RangeTracker& m_RangeTracker; - - Graph& m_Graph; - - std::vector m_LayersToCalibrate; - std::vector m_LayersNotToCalibrate; - std::vector m_DebugLayers; - - std::vector m_OutputLayers; - - void AddToCalibratedLayers(const IConnectableLayer* layer); - void AddToNonCalibratedLayers(const IConnectableLayer* layer); - void RemoveDebugLayers(); -}; - -} //namespace armnn diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 6e6559137c..9850520116 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -8,93 +8,9 @@ #include - -/// This list uses X macro technique. -/// See https://en.wikipedia.org/wiki/X_Macro for more info -#define LIST_OF_LAYER_TYPE \ - X(Activation) \ - X(Addition) \ - X(ArgMinMax) \ - X(BatchNormalization) \ - X(BatchToSpaceNd) \ - X(Comparison) \ - X(Concat) \ - X(Constant) \ - X(ConvertBf16ToFp32) \ - X(ConvertFp16ToFp32) \ - X(ConvertFp32ToBf16) \ - X(ConvertFp32ToFp16) \ - X(Convolution2d) \ - X(Debug) \ - X(DepthToSpace) \ - X(DepthwiseConvolution2d) \ - X(Dequantize) \ - X(DetectionPostProcess) \ - X(Division) \ - X(ElementwiseUnary) \ - X(FakeQuantization) \ - X(Fill) \ - X(Floor) \ - X(FullyConnected) \ - X(Gather) \ - X(Input) \ - X(InstanceNormalization) \ - X(L2Normalization) \ - X(LogicalBinary) \ - X(LogSoftmax) \ - X(Lstm) \ - X(QLstm) \ - X(Map) \ - X(Maximum) \ - X(Mean) \ - X(MemCopy) \ - X(MemImport) \ - X(Merge) \ - X(Minimum) \ - X(Multiplication) \ - X(Normalization) \ - X(Output) \ - X(Pad) \ - X(Permute) \ - X(Pooling2d) \ - X(PreCompiled) \ - X(Prelu) \ - X(Quantize) \ - X(QuantizedLstm) \ - X(Reshape) \ - X(Rank) \ - X(Reduce) \ - X(Resize) \ - X(Slice) \ - X(Softmax) \ - X(SpaceToBatchNd) \ - X(SpaceToDepth) \ - X(Splitter) \ - X(Stack) \ - X(StandIn) \ - X(StridedSlice) \ - X(Subtraction) \ - X(Switch) \ - X(Transpose) \ - X(TransposeConvolution2d) \ - X(Unmap) - -/// When adding a new layer, adapt also the LastLayer enum value in the -/// enum class LayerType below namespace armnn { -enum class LayerType -{ -#define X(name) name, - LIST_OF_LAYER_TYPE -#undef X - FirstLayer = Activation, - LastLayer = Unmap -}; - -const char* GetLayerTypeAsCString(LayerType type); - using Coordinates = std::array; using Dimensions = std::array; diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp index 9a526a0943..c9733e822b 100644 --- a/src/armnn/Layer.cpp +++ b/src/armnn/Layer.cpp @@ -473,4 +473,10 @@ void Layer::SerializeLayerParameters(ParameterStringifyFunction& fn) const } } +// default implementation of ExecuteStrategy +void Layer::ExecuteStrategy(IStrategy& strategy) const +{ + strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName()); +} + } // namespace armnn diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp index ef0f8c3297..2f5cacc3ce 100644 --- a/src/armnn/Layer.hpp +++ b/src/armnn/Layer.hpp @@ -214,6 +214,9 @@ public: Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name); Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name); + void ExecuteStrategy(IStrategy& strategy) const override; + + const std::string& GetNameStr() const { return m_LayerName; @@ -259,7 +262,7 @@ public: void ResetPriority() const; LayerPriority GetPriority() const; - LayerType GetType() const { return m_Type; } + LayerType GetType() const override { return m_Type; } DataType GetDataType() const; @@ -440,6 +443,11 @@ public: LayerBindingId GetBindingId() const { return m_Id; }; + void ExecuteStrategy(IStrategy& strategy) const override + { + strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName(), GetBindingId()); + } + protected: ~BindableLayer() = default; diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index f8b0675f0d..bf7a056f6e 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -2021,6 +2021,14 @@ void Network::Accept(ILayerVisitor& visitor) const }; } +void Network::ExecuteStrategy(IStrategy& strategy) const +{ + for (auto layer : GetGraph()) + { + layer->ExecuteStrategy(strategy); + }; +} + OptimizedNetwork::OptimizedNetwork(std::unique_ptr graph) : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 1205bd847e..cffade5a21 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -258,6 +258,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + private: IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, const ConstTensor& weights, diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp index e6becee96f..eed3f41bdc 100644 --- a/src/armnn/NetworkQuantizer.cpp +++ b/src/armnn/NetworkQuantizer.cpp @@ -8,9 +8,9 @@ #include "Graph.hpp" #include "Layer.hpp" #include "Network.hpp" -#include "DynamicQuantizationVisitor.hpp" -#include "StaticRangeVisitor.hpp" -#include "QuantizerVisitor.hpp" +#include "DynamicQuantizationStrategy.hpp" +#include "StaticRangeStrategy.hpp" +#include "QuantizerStrategy.hpp" #include "OverrideInputRangeVisitor.hpp" #include @@ -60,9 +60,9 @@ void NetworkQuantizer::OverrideInputRange(LayerBindingId layerId, float min, flo void NetworkQuantizer::Refine(const InputTensors& inputTensors) { - // The first time Refine is called the m_Runtime and the DynamicQuantizationVisitor + // The first time Refine is called the m_Runtime and the DynamicQuantizationStrategy // will not have been created. Need to get the environment set up, Runtime loaded, - // DynamicQuantizationVisitor created and run over the network to initialise itself + // DynamicQuantizationStrategy created and run over the network to initialise itself // and the RangeTracker the Debug callback registered and an initial inference // done to set up the first min/max values if (!m_Runtime) @@ -71,15 +71,15 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors) m_Ranges.SetDynamicMode(true); const Graph& cGraph = PolymorphicDowncast(m_InputNetwork)->GetGraph().TopologicalSort(); - // need to insert Debug layers in the DynamicQuantizationVisitor + // need to insert Debug layers in the DynamicQuantizationStrategy Graph& graph = const_cast(cGraph); // Initialize RangeTracker to the default values for each layer. // The default values are overwritten by the min/max that is // recorded during the first dataset min/max calibration. This // initialisation is only required for the first call of Refine(). - m_DynamicQuantizationVisitor = DynamicQuantizationVisitor(m_Ranges, graph); - VisitLayers(cGraph, m_DynamicQuantizationVisitor.value()); + m_DynamicQuantizationStrategy = DynamicQuantizationStrategy(m_Ranges, graph); + ApplyStrategyToLayers(cGraph, m_DynamicQuantizationStrategy.value()); IRuntime::CreationOptions options; m_Runtime = IRuntime::Create(options); @@ -119,7 +119,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors) // Create output tensor for EnqueueWorkload std::vector outputBindings; - auto outputLayers = m_DynamicQuantizationVisitor.value().GetOutputLayers(); + auto outputLayers = m_DynamicQuantizationStrategy.value().GetOutputLayers(); std::vector outputVectors; for (auto outputLayerBindingId : outputLayers) { @@ -144,16 +144,16 @@ INetworkPtr NetworkQuantizer::ExportNetwork() if (!m_Runtime) { m_Ranges.SetDynamicMode(false); - StaticRangeVisitor rangeVisitor(m_Ranges); - VisitLayers(graph, rangeVisitor); + StaticRangeStrategy rangeStrategy(m_Ranges); + ApplyStrategyToLayers(graph, rangeStrategy); } else { // Set min/max range of non-calibrated layers to parent layer's range - m_DynamicQuantizationVisitor.value().VisitNonCalibratedLayers(); + m_DynamicQuantizationStrategy.value().VisitNonCalibratedLayers(); // now tear down the runtime and the dynamic visitor. m_Runtime.reset(nullptr); - m_DynamicQuantizationVisitor = EmptyOptional(); + m_DynamicQuantizationStrategy = EmptyOptional(); m_RefineCount = 0; } @@ -177,8 +177,8 @@ INetworkPtr NetworkQuantizer::ExportNetwork() throw InvalidArgumentException("Unsupported quantization target"); } - QuantizerVisitor quantizerVisitor(m_Ranges, quantizationScheme.get(), m_Options.m_PreserveType); - VisitLayers(graph, quantizerVisitor); + QuantizerStrategy quantizerVisitor(m_Ranges, quantizationScheme.get(), m_Options.m_PreserveType); + ApplyStrategyToLayers(graph, quantizerVisitor); // clear the ranges m_Ranges.Reset(); diff --git a/src/armnn/NetworkQuantizer.hpp b/src/armnn/NetworkQuantizer.hpp index d384bdc545..a07ac8827e 100644 --- a/src/armnn/NetworkQuantizer.hpp +++ b/src/armnn/NetworkQuantizer.hpp @@ -11,7 +11,7 @@ #include #include -#include "DynamicQuantizationVisitor.hpp" +#include "DynamicQuantizationStrategy.hpp" #include "RangeTracker.hpp" namespace armnn @@ -44,7 +44,7 @@ private: // the runtime between invocations of the Refine method. IRuntimePtr m_Runtime; - Optional m_DynamicQuantizationVisitor; + Optional m_DynamicQuantizationStrategy; // counts the number of times refine is called unsigned int m_RefineCount; diff --git a/src/armnn/NetworkQuantizerUtils.hpp b/src/armnn/NetworkQuantizerUtils.hpp index dd274f9e35..5497e1b898 100644 --- a/src/armnn/NetworkQuantizerUtils.hpp +++ b/src/armnn/NetworkQuantizerUtils.hpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -56,4 +57,14 @@ void VisitLayers(const LayerContainer& layerContainer, ILayerVisitor& visitor) visitor.FinishVisit(); } +template +void ApplyStrategyToLayers(const LayerContainer& layerContainer, IStrategy& strategy) +{ + for (auto layer : layerContainer) + { + layer->ExecuteStrategy(strategy); + } + strategy.FinishStrategy(); +} + } // namespace armnn diff --git a/src/armnn/OverrideInputRangeVisitor.hpp b/src/armnn/OverrideInputRangeVisitor.hpp index 511c851bef..196a3aab1d 100644 --- a/src/armnn/OverrideInputRangeVisitor.hpp +++ b/src/armnn/OverrideInputRangeVisitor.hpp @@ -13,6 +13,57 @@ namespace armnn { +class OverrideInputRangeStrategy : public IStrategy +{ +private: + using MinMaxRange = RangeTracker::MinMaxRange; +public : + OverrideInputRangeStrategy(RangeTracker& ranges, + LayerBindingId layerId, + const MinMaxRange& minMaxRange) + : m_Ranges(ranges) + , m_LayerId(layerId) + , m_MinMaxRange(minMaxRange){} + + ~OverrideInputRangeStrategy() = default; + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override + { + IgnoreUnused(name, constants, id, descriptor); + + switch (layer->GetType()) + { + case armnn::LayerType::Input : + { + if (m_LayerId == id) + { + m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second); + } + break; + } + default: + { + std::cout << "dont know this one" << std::endl; + } + } + } + +private: + /// Mapping from a layer Guid to an array of ranges for outputs + RangeTracker& m_Ranges; + + /// The id of the input layer of which to override the input range + LayerBindingId m_LayerId; + + /// The new input range to be applied to the input layer + MinMaxRange m_MinMaxRange; +}; + + /// Visitor object for overriding the input range of the quantized input layers in a network class OverrideInputRangeVisitor : public LayerVisitorBase diff --git a/src/armnn/QuantizerStrategy.cpp b/src/armnn/QuantizerStrategy.cpp new file mode 100644 index 0000000000..df20749072 --- /dev/null +++ b/src/armnn/QuantizerStrategy.cpp @@ -0,0 +1,519 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "QuantizerStrategy.hpp" +#include "armnn/utility/PolymorphicDowncast.hpp" + +namespace armnn +{ + +QuantizerStrategy::QuantizerStrategy(const RangeTracker& rangeTracker, + const IQuantizationScheme* quantizationScheme, + bool preserveType) + : m_Ranges(rangeTracker) + , m_QuantizedNetwork(INetwork::Create()) + , m_QuantizationScheme(quantizationScheme) + , m_PreserveType(preserveType) +{ +} + +void QuantizerStrategy::SetQuantizedInputConnections(const IConnectableLayer* srcLayer, + IConnectableLayer* quantizedLayer) +{ + ARMNN_ASSERT(srcLayer); + for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++) + { + const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i); + const InputSlot* inputSlot = static_cast(&srcInputSlot); + ARMNN_ASSERT(inputSlot); + const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); + + ARMNN_ASSERT(outputSlot); + unsigned int slotIdx = outputSlot->CalculateIndexOnOwner(); + Layer& layerToFind = outputSlot->GetOwningLayer(); + + auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid()); + if (found == m_OriginalToQuantizedGuidMap.end()) + { + // Error in graph traversal order + ARMNN_ASSERT_MSG(false, "Error in graph traversal"); + return; + } + + // Connect the slots in the quantized model + IConnectableLayer* prevQuantizedLayer = m_QuantizedGuidToLayerMap[found->second]; + IInputSlot& newInputSlot = quantizedLayer->GetInputSlot(i); + IOutputSlot& newOutputSlot = prevQuantizedLayer->GetOutputSlot(slotIdx); + newOutputSlot.Connect(newInputSlot); + TensorInfo info(outputSlot->GetTensorInfo()); + + // Only try to set quantization params on tensors that can be quantized + if (inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Boolean && + inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed32 && + inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed64) + { + // Fetch the min/max ranges that were computed earlier + auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx); + OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second); + info.SetDataType(m_QuantizationScheme->GetDataType()); + info.SetQuantizationOffset(qParams.second); + info.SetQuantizationScale(qParams.first); + } + newOutputSlot.SetTensorInfo(info); + } +} + +ConstTensor QuantizerStrategy::CreateQuantizedBias(const IConnectableLayer* srcLayer, + const ConstTensor& weights, + const Optional& biases, + std::vector& backing) +{ + ARMNN_ASSERT(srcLayer); + const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0); + auto inputSlot = static_cast(&srcInputSlot); + ARMNN_ASSERT(inputSlot); + const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); + + ARMNN_ASSERT(outputSlot); + unsigned int slotIdx = outputSlot->CalculateIndexOnOwner(); + Layer& layerToFind = outputSlot->GetOwningLayer(); + + auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid()); + if (found == m_OriginalToQuantizedGuidMap.end()) + { + // Error in graph traversal order + ARMNN_ASSERT_MSG(false, "Error in graph traversal"); + return biases.value(); + } + + // Fetch the min/max ranges that were computed earlier + auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx); + OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second); + + // Get the quantization scale based on input and weight scale + float scale = qParams.first * weights.GetInfo().GetQuantizationScale(); + + // Set up quantized bias tensor info and allocate space + TensorInfo qInfo(biases.value().GetInfo().GetShape(), DataType::Signed32, scale, 0); + backing.resize(biases.value().GetInfo().GetNumElements()); + + // Convert values to int32 + for (size_t i = 0; i < backing.size(); ++i) + { + float fp32Value = static_cast(biases.value().GetMemoryArea())[i]; + backing[i] = armnn::numeric_cast(fp32Value * ( 1 / scale )); + } + + return ConstTensor(qInfo, backing); +} + +void QuantizerStrategy::RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer) +{ + m_OriginalToQuantizedGuidMap.insert(std::make_pair(srcLayer->GetGuid(), quantizedLayer->GetGuid())); + m_QuantizedGuidToLayerMap.insert(std::make_pair(quantizedLayer->GetGuid(), quantizedLayer)); +} + +void QuantizerStrategy::ExecuteStrategy(const armnn::IConnectableLayer *layer, + const BaseDescriptor& descriptor, + const std::vector &constants, + const char *name, + const armnn::LayerBindingId id) +{ + IgnoreUnused(id); + + IConnectableLayer* newLayer; + + switch (layer->GetType()) + { + case armnn::LayerType::Addition : + { + newLayer = m_QuantizedNetwork->AddAdditionLayer(name); + break; + } + case armnn::LayerType::Activation : + { + const ActivationDescriptor& activationDescriptor = static_cast(descriptor); + newLayer = m_QuantizedNetwork->AddActivationLayer(activationDescriptor, name); + break; + } + case armnn::LayerType::ArgMinMax : + { + ArgMinMaxDescriptor argMinMaxDescriptor = static_cast(descriptor); + newLayer = m_QuantizedNetwork->AddArgMinMaxLayer(argMinMaxDescriptor, name); + break; + } + case armnn::LayerType::BatchNormalization : + { + + BatchNormalizationDescriptor batchNormalizationDescriptor = + static_cast(descriptor); + std::vector meanBacking; + ConstTensor qMean = CreateQuantizedConst(constants[0], meanBacking); + + std::vector varianceBacking; + ConstTensor qVariance = CreateQuantizedConst(constants[1], varianceBacking); + + std::vector betaBacking; + ConstTensor qBeta = CreateQuantizedConst(constants[2], betaBacking); + + std::vector gammaBacking; + ConstTensor qGamma = CreateQuantizedConst(constants[3], gammaBacking); + + newLayer = m_QuantizedNetwork->AddBatchNormalizationLayer(batchNormalizationDescriptor, + qMean, + qVariance, + qBeta, + qGamma, + name); + break; + } + case armnn::LayerType::BatchToSpaceNd : + { + BatchToSpaceNdDescriptor batchToSpaceNdDescriptor = + static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name); + break; + } + case armnn::LayerType::Comparison : + { + ComparisonDescriptor comparisonDescriptor =static_cast(descriptor); + newLayer = m_QuantizedNetwork->AddComparisonLayer(comparisonDescriptor, name); + break; + } + case armnn::LayerType::Concat : + { + OriginsDescriptor originsDescriptor = static_cast(descriptor); + newLayer = m_QuantizedNetwork->AddConcatLayer(originsDescriptor, name); + break; + } + case armnn::LayerType::Constant : + { + std::vector inputBacking; + ConstTensor qInput = CreateQuantizedConst(constants[0], inputBacking); + + newLayer = m_QuantizedNetwork->AddConstantLayer(qInput, name); + break; + } + case armnn::LayerType::Convolution2d : + { + const armnn::Optional biases = constants.size() == 1 ? + armnn::Optional{} : + armnn::Optional(constants[1]); + + std::vector weightsBacking; + ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking); + Optional optionalQBiases; + std::vector biasesBacking; + + if (biases.has_value()) + { + ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); + optionalQBiases = Optional(qBiases); + } + Convolution2dDescriptor convolution2dDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, + qWeights, + optionalQBiases, + name); + break; + } + case armnn::LayerType::DepthToSpace : + { + DepthToSpaceDescriptor depthToSpaceDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddDepthToSpaceLayer(depthToSpaceDescriptor, name); + break; + } + case armnn::LayerType::DepthwiseConvolution2d : + { + DepthwiseConvolution2dDescriptor depthwiseConvolution2dDescriptor = + static_cast(descriptor); + + const armnn::Optional biases = constants.size() == 1 ? + armnn::Optional{} : + armnn::Optional(constants[1]); + + std::vector weightsBacking; + ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking); + Optional optionalQBiases; + std::vector biasesBacking; + + if (biases.has_value()) + { + ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); + optionalQBiases = Optional(qBiases); + } + + newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer( + depthwiseConvolution2dDescriptor, + qWeights, + optionalQBiases, + name); + break; + } + case armnn::LayerType::ElementwiseUnary : + { + ElementwiseUnaryDescriptor elementwiseUnaryDescriptor = + static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name); + break; + } + case armnn::LayerType::Fill : + { + FillDescriptor fillDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddFillLayer(fillDescriptor, name); + break; + } + case armnn::LayerType::FullyConnected : + { + FullyConnectedDescriptor fullyConnectedDescriptor = + static_cast(descriptor); + + const armnn::Optional biases = constants.size() == 1 ? + armnn::Optional{} : + armnn::Optional(constants[1]); + + std::vector weightsBacking; + ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking); + Optional optionalQBiases; + std::vector biasesBacking; + + if (biases.has_value()) + { + ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); + optionalQBiases = Optional(qBiases); + } + + newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(fullyConnectedDescriptor, + qWeights, + optionalQBiases, + name); + break; + } + case armnn::LayerType::Input : + { + const DataType dataType = layer->GetOutputSlot(0).GetTensorInfo().GetDataType(); + IConnectableLayer* inputLayer = m_QuantizedNetwork->AddInputLayer(id, name); + + if (m_PreserveType && (dataType == DataType::Float32 || dataType == DataType::Float16)) + { + IConnectableLayer* quantizeLayer = m_QuantizedNetwork->AddQuantizeLayer(); + inputLayer->GetOutputSlot(0).Connect(quantizeLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(layer->GetOutputSlot(0).GetTensorInfo()); + RecordLayer(layer, quantizeLayer); + return; + } + else + { + RecordLayer(layer, inputLayer); + return; + } + } + case armnn::LayerType::InstanceNormalization : + { + InstanceNormalizationDescriptor instanceNormalizationDescriptor = + static_cast(descriptor); + + newLayer = + m_QuantizedNetwork->AddInstanceNormalizationLayer(instanceNormalizationDescriptor, name); + break; + } + case armnn::LayerType::LogSoftmax : + { + LogSoftmaxDescriptor logSoftmaxDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddLogSoftmaxLayer(logSoftmaxDescriptor, name); + break; + } + case armnn::LayerType::Mean : + { + MeanDescriptor meanDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddMeanLayer(meanDescriptor, name); + break; + } + case armnn::LayerType::Multiplication : + { + newLayer = m_QuantizedNetwork->AddMultiplicationLayer(name); + break; + } + case armnn::LayerType::Normalization : + { + NormalizationDescriptor normalizationDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddNormalizationLayer(normalizationDescriptor, name); + break; + } + case armnn::LayerType::Output : + { + const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + const DataType& dataType = info.GetDataType(); + newLayer = m_QuantizedNetwork->AddOutputLayer(id, name); + + if (m_PreserveType && (dataType == DataType::Float32 || dataType == DataType::Float16)) + { + IConnectableLayer* dequantizeLayer = m_QuantizedNetwork->AddDequantizeLayer(); + RecordLayer(layer, dequantizeLayer); + SetQuantizedInputConnections(layer, dequantizeLayer); + dequantizeLayer->GetOutputSlot(0).Connect(newLayer->GetInputSlot(0)); + dequantizeLayer->GetOutputSlot(0).SetTensorInfo(info); + return; + } + else + { + break; + } + } + case armnn::LayerType::Pad : + { + PadDescriptor padDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddPadLayer(padDescriptor, name); + break; + } + case armnn::LayerType::Permute : + { + PermuteDescriptor permuteDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddPermuteLayer(permuteDescriptor, name); + break; + } + case armnn::LayerType::Pooling2d : + { + Pooling2dDescriptor pooling2dDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddPooling2dLayer(pooling2dDescriptor, name); + break; + } + case armnn::LayerType::Prelu : + { + newLayer = m_QuantizedNetwork->AddPreluLayer(name); + break; + } + case armnn::LayerType::Reshape : + { + ReshapeDescriptor reshapeDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddReshapeLayer(reshapeDescriptor, name); + break; + } + case armnn::LayerType::Resize : + { + + ResizeBilinearDescriptor resizeBilinearDescriptor = + static_cast(descriptor); + + ResizeDescriptor resizeDescriptor; + resizeDescriptor.m_Method = ResizeMethod::Bilinear; + resizeDescriptor.m_TargetWidth = resizeBilinearDescriptor.m_TargetWidth; + resizeDescriptor.m_TargetHeight = resizeBilinearDescriptor.m_TargetHeight; + resizeDescriptor.m_DataLayout = resizeBilinearDescriptor.m_DataLayout; + + newLayer = m_QuantizedNetwork->AddResizeLayer(resizeDescriptor, name); + break; + } + case armnn::LayerType::Slice : + { + SliceDescriptor sliceDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddSliceLayer(sliceDescriptor, name); + break; + } + case armnn::LayerType::Softmax : + { + SoftmaxDescriptor softmaxDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddSoftmaxLayer(softmaxDescriptor, name); + break; + } + case armnn::LayerType::SpaceToBatchNd : + { + SpaceToBatchNdDescriptor spaceToBatchNdDescriptor = + static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name); + break; + } + case armnn::LayerType::SpaceToDepth : + { + SpaceToDepthDescriptor spaceToDepthDescriptor = static_cast(descriptor); + newLayer = m_QuantizedNetwork->AddSpaceToDepthLayer(spaceToDepthDescriptor, name); + break; + } + case armnn::LayerType::Splitter : + { + SplitterDescriptor splitterDescriptor = static_cast(descriptor); + newLayer = m_QuantizedNetwork->AddSplitterLayer(splitterDescriptor, name); + break; + } + case armnn::LayerType::Stack : + { + StackDescriptor stackDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddStackLayer(stackDescriptor, name); + break; + } + case armnn::LayerType::StridedSlice : + { + StridedSliceDescriptor stridedSliceDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddStridedSliceLayer(stridedSliceDescriptor, name); + break; + } + case armnn::LayerType::Subtraction : + { + newLayer = m_QuantizedNetwork->AddSubtractionLayer( name); + break; + } + case armnn::LayerType::TransposeConvolution2d : + { + + const armnn::Optional biases = constants.size() == 1 ? + armnn::Optional{} : + armnn::Optional(constants[1]); + // quantize weights + std::vector weightsBacking; + ConstTensor qWeights = CreateQuantizedConst(constants[0], weightsBacking); + + // quantize biases + std::vector biasesBacking; + Optional optionalQBiases; + if (biases.has_value()) + { + ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); + optionalQBiases = Optional(qBiases); + } + + TransposeConvolution2dDescriptor transposeConvolution2dDescriptor = + static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddTransposeConvolution2dLayer(transposeConvolution2dDescriptor, + qWeights, + optionalQBiases, + name); + break; + } + case armnn::LayerType::Transpose : + { + TransposeDescriptor transposeDescriptor = static_cast(descriptor); + + newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name); + break; + } + default: + { + throw UnimplementedException("Unimplemented layer encountered"); + } + } + RecordLayer(layer, newLayer); + SetQuantizedInputConnections(layer, newLayer); +} + +} + diff --git a/src/armnn/QuantizerStrategy.hpp b/src/armnn/QuantizerStrategy.hpp new file mode 100644 index 0000000000..f782959020 --- /dev/null +++ b/src/armnn/QuantizerStrategy.hpp @@ -0,0 +1,63 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Network.hpp" +#include "NetworkQuantizerUtils.hpp" +#include "StaticRangeStrategy.hpp" + +#include +#include + +namespace armnn +{ +class QuantizerStrategy : public IStrategy +{ +public : + QuantizerStrategy(const RangeTracker& rangeTracker, + const IQuantizationScheme* quantizationScheme, + bool preserveType); + + ~QuantizerStrategy() = default; + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override; + + /// Extract the quantized network + INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); } + +private: + /// Connects the layer to preceeding layers and sets the quantization parameters based on recorded ranges + void SetQuantizedInputConnections(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer); + + /// Record the guids so we can easily find the layers later + void RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* qLayer); + + /// Sets the bias quantization scale based on input and weight scales + ConstTensor CreateQuantizedBias(const IConnectableLayer* srcLayer, + const ConstTensor& weights, + const Optional& biases, + std::vector& weightsBacking); + + /// Reference to the static range visitor used to retrieve the quantization ranges + const RangeTracker& m_Ranges; + + /// Quantized version of the model we are building up + INetworkPtr m_QuantizedNetwork; + + /// Mapping from input network guids to quantized network guids + std::unordered_map m_OriginalToQuantizedGuidMap; + + /// Mapping from guid to layer in quantized network + std::unordered_map m_QuantizedGuidToLayerMap; + + const IQuantizationScheme* m_QuantizationScheme; + + const bool m_PreserveType; +}; + +} //namespace armnn \ No newline at end of file diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp deleted file mode 100644 index 0e9d22463f..0000000000 --- a/src/armnn/QuantizerVisitor.cpp +++ /dev/null @@ -1,589 +0,0 @@ -// -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "Network.hpp" -#include "NetworkQuantizerUtils.hpp" -#include "QuantizerVisitor.hpp" -#include "StaticRangeVisitor.hpp" - -#include -#include - -namespace armnn -{ - -QuantizerVisitor::QuantizerVisitor(const RangeTracker& rangeTracker, - const IQuantizationScheme* quantizationScheme, - bool preserveType) - : m_Ranges(rangeTracker) - , m_QuantizedNetwork(INetwork::Create()) - , m_QuantizationScheme(quantizationScheme) - , m_PreserveType(preserveType) -{ -} - -void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* srcLayer, - IConnectableLayer* quantizedLayer) -{ - ARMNN_ASSERT(srcLayer); - for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++) - { - const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i); - const InputSlot* inputSlot = PolymorphicDowncast(&srcInputSlot); - ARMNN_ASSERT(inputSlot); - const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); - - ARMNN_ASSERT(outputSlot); - unsigned int slotIdx = outputSlot->CalculateIndexOnOwner(); - Layer& layerToFind = outputSlot->GetOwningLayer(); - - auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid()); - if (found == m_OriginalToQuantizedGuidMap.end()) - { - // Error in graph traversal order - ARMNN_ASSERT_MSG(false, "Error in graph traversal"); - return; - } - - // Connect the slots in the quantized model - IConnectableLayer* prevQuantizedLayer = m_QuantizedGuidToLayerMap[found->second]; - IInputSlot& newInputSlot = quantizedLayer->GetInputSlot(i); - IOutputSlot& newOutputSlot = prevQuantizedLayer->GetOutputSlot(slotIdx); - newOutputSlot.Connect(newInputSlot); - TensorInfo info(outputSlot->GetTensorInfo()); - - // Only try to set quantization params on tensors that can be quantized - if (inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Boolean && - inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed32 && - inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed64) - { - // Fetch the min/max ranges that were computed earlier - auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx); - OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second); - info.SetDataType(m_QuantizationScheme->GetDataType()); - info.SetQuantizationOffset(qParams.second); - info.SetQuantizationScale(qParams.first); - } - newOutputSlot.SetTensorInfo(info); - } -} - -ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLayer, - const ConstTensor& weights, - const Optional& biases, - std::vector& backing) -{ - ARMNN_ASSERT(srcLayer); - const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0); - auto inputSlot = PolymorphicDowncast(&srcInputSlot); - ARMNN_ASSERT(inputSlot); - const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); - - ARMNN_ASSERT(outputSlot); - unsigned int slotIdx = outputSlot->CalculateIndexOnOwner(); - Layer& layerToFind = outputSlot->GetOwningLayer(); - - auto found = m_OriginalToQuantizedGuidMap.find(layerToFind.GetGuid()); - if (found == m_OriginalToQuantizedGuidMap.end()) - { - // Error in graph traversal order - ARMNN_ASSERT_MSG(false, "Error in graph traversal"); - return biases.value(); - } - - // Fetch the min/max ranges that were computed earlier - auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx); - OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second); - - // Get the quantization scale based on input and weight scale - float scale = qParams.first * weights.GetInfo().GetQuantizationScale(); - - // Set up quantized bias tensor info and allocate space - TensorInfo qInfo(biases.value().GetInfo().GetShape(), DataType::Signed32, scale, 0); - backing.resize(biases.value().GetInfo().GetNumElements()); - - // Convert values to int32 - for (size_t i = 0; i < backing.size(); ++i) - { - float fp32Value = static_cast(biases.value().GetMemoryArea())[i]; - backing[i] = armnn::numeric_cast(fp32Value * ( 1 / scale )); - } - - return ConstTensor(qInfo, backing); -} - -void QuantizerVisitor::RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer) -{ - m_OriginalToQuantizedGuidMap.insert(std::make_pair(srcLayer->GetGuid(), quantizedLayer->GetGuid())); - m_QuantizedGuidToLayerMap.insert(std::make_pair(quantizedLayer->GetGuid(), quantizedLayer)); -} - -void QuantizerVisitor::VisitAbsLayer(const IConnectableLayer* layer, const char* name) -{ - VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Abs), name); -} - -void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& activationDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddActivationLayer(activationDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddAdditionLayer(name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor& argMinMaxDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddArgMinMaxLayer(argMinMaxDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name) -{ - std::vector meanBacking; - ConstTensor qMean = CreateQuantizedConst(mean, meanBacking); - - std::vector varianceBacking; - ConstTensor qVariance = CreateQuantizedConst(variance, varianceBacking); - - std::vector betaBacking; - ConstTensor qBeta = CreateQuantizedConst(beta, betaBacking); - - std::vector gammaBacking; - ConstTensor qGamma = CreateQuantizedConst(gamma, gammaBacking); - - IConnectableLayer* newLayer = m_QuantizedNetwork->AddBatchNormalizationLayer(desc, - qMean, - qVariance, - qBeta, - qGamma, - name); - - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitComparisonLayer(const IConnectableLayer* layer, - const ComparisonDescriptor& comparisonDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddComparisonLayer(comparisonDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitConcatLayer(const IConnectableLayer* layer, - const OriginsDescriptor& originsDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(originsDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name) -{ - std::vector inputBacking; - ConstTensor qInput = CreateQuantizedConst(input, inputBacking); - - IConnectableLayer* newLayer = m_QuantizedNetwork->AddConstantLayer(qInput, name); - RecordLayer(layer, newLayer); -} - -void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - std::vector weightsBacking; - ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - Optional optionalQBiases; - std::vector biasesBacking; - - if (biases.has_value()) - { - ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); - optionalQBiases = Optional(qBiases); - } - - IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, - qWeights, - optionalQBiases, - name); - - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitDepthToSpaceLayer(const IConnectableLayer* layer, - const DepthToSpaceDescriptor& descriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddDepthToSpaceLayer(descriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, - const DepthwiseConvolution2dDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - std::vector weightsBacking; - ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - Optional optionalQBiases; - std::vector biasesBacking; - - if (biases.has_value()) - { - ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); - optionalQBiases = Optional(qBiases); - } - - IConnectableLayer* newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc, - qWeights, - optionalQBiases, - name); - - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitElementwiseUnaryLayer(const IConnectableLayer* layer, - const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitFillLayer(const IConnectableLayer* layer, - const FillDescriptor& desc, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddFillLayer(desc, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char *name) -{ - std::vector weightsBacking; - ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - Optional optionalQBiases; - std::vector biasesBacking; - - if (biases.has_value()) - { - ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); - optionalQBiases = Optional(qBiases); - } - - IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, - qWeights, - optionalQBiases, - name); - - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitInputLayer(const IConnectableLayer *layer, LayerBindingId id, const char *name) -{ - const DataType dataType = layer->GetOutputSlot(0).GetTensorInfo().GetDataType(); - IConnectableLayer* inputLayer = m_QuantizedNetwork->AddInputLayer(id, name); - - if (m_PreserveType && (dataType == DataType::Float32 || dataType == DataType::Float16)) - { - IConnectableLayer* quantizeLayer = m_QuantizedNetwork->AddQuantizeLayer(); - inputLayer->GetOutputSlot(0).Connect(quantizeLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(layer->GetOutputSlot(0).GetTensorInfo()); - RecordLayer(layer, quantizeLayer); - } - else - { - RecordLayer(layer, inputLayer); - } -} - -void QuantizerVisitor::VisitInstanceNormalizationLayer(const IConnectableLayer* layer, - const InstanceNormalizationDescriptor& descriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddInstanceNormalizationLayer(descriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitLogSoftmaxLayer(const IConnectableLayer* layer, - const LogSoftmaxDescriptor& logSoftmaxDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddLogSoftmaxLayer(logSoftmaxDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitMeanLayer(const IConnectableLayer* layer, - const MeanDescriptor& meanDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddMeanLayer(meanDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddMultiplicationLayer(name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* layer, - const armnn::NormalizationDescriptor& normalizationDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddNormalizationLayer(normalizationDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name) -{ - const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); - const DataType& dataType = info.GetDataType(); - IConnectableLayer* outputLayer = m_QuantizedNetwork->AddOutputLayer(id, name); - - if (m_PreserveType && (dataType == DataType::Float32 || dataType == DataType::Float16)) - { - IConnectableLayer* dequantizeLayer = m_QuantizedNetwork->AddDequantizeLayer(); - RecordLayer(layer, dequantizeLayer); - SetQuantizedInputConnections(layer, dequantizeLayer); - dequantizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - dequantizeLayer->GetOutputSlot(0).SetTensorInfo(info); - } - else - { - RecordLayer(layer, outputLayer); - SetQuantizedInputConnections(layer, outputLayer); - } -} - -void QuantizerVisitor::VisitPadLayer(const IConnectableLayer* layer, - const PadDescriptor& padDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddPadLayer(padDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& permuteDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddPermuteLayer(permuteDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& pooling2dDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddPooling2dLayer(pooling2dDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitPreluLayer(const IConnectableLayer* layer, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddPreluLayer(name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddReshapeLayer(reshapeDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeBilinearDescriptor, - const char* name) -{ - ResizeDescriptor resizeDescriptor; - resizeDescriptor.m_Method = ResizeMethod::Bilinear; - resizeDescriptor.m_TargetWidth = resizeBilinearDescriptor.m_TargetWidth; - resizeDescriptor.m_TargetHeight = resizeBilinearDescriptor.m_TargetHeight; - resizeDescriptor.m_DataLayout = resizeBilinearDescriptor.m_DataLayout; - - VisitResizeLayer(layer, resizeDescriptor, name); -} - -void QuantizerVisitor::VisitResizeLayer(const IConnectableLayer* layer, - const ResizeDescriptor& resizeDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddResizeLayer(resizeDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer, const char* name) -{ - VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name); -} - -void QuantizerVisitor::VisitSliceLayer(const IConnectableLayer* layer, - const SliceDescriptor& sliceDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddSliceLayer(sliceDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& softmaxDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddSoftmaxLayer(softmaxDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitSpaceToDepthLayer(const IConnectableLayer* layer, - const SpaceToDepthDescriptor& spaceToDepthDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddSpaceToDepthLayer(spaceToDepthDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& splitterDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddSplitterLayer(splitterDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitStackLayer(const IConnectableLayer* layer, - const StackDescriptor& stackDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddStackLayer(stackDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& stridedSliceDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddStridedSliceLayer(stridedSliceDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitSubtractionLayer(const IConnectableLayer* layer, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddSubtractionLayer(name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitTransposeConvolution2dLayer(const IConnectableLayer* layer, - const TransposeConvolution2dDescriptor& descriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - // quantize weights - std::vector weightsBacking; - ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - - // quantize biases - std::vector biasesBacking; - Optional optionalQBiases; - if (biases.has_value()) - { - ConstTensor qBiases = CreateQuantizedBias(layer, qWeights, biases, biasesBacking); - optionalQBiases = Optional(qBiases); - } - - IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeConvolution2dLayer(descriptor, - qWeights, - optionalQBiases, - name); - - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitTransposeLayer(const IConnectableLayer* layer, - const TransposeDescriptor& transposeDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -} //namespace armnn diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp deleted file mode 100644 index 65bd67101e..0000000000 --- a/src/armnn/QuantizerVisitor.hpp +++ /dev/null @@ -1,231 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "armnn/LayerVisitorBase.hpp" -#include "StaticRangeVisitor.hpp" -#include "NetworkQuantizationScheme.hpp" - -#include -#include -#include - -#include - -namespace armnn -{ - -// Forward declaration -class StaticRangeVisitor; - -/// Visitor object for quantizing layers in a network -class QuantizerVisitor : public LayerVisitorBase -{ -public: - QuantizerVisitor(const RangeTracker& rangeTracker, - const IQuantizationScheme* quantizationScheme, - bool preserveType = false); - - ~QuantizerVisitor() = default; - - /// Functions to quantize the individual layers, overridden from ILayerVisitor - ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") - void VisitAbsLayer(const IConnectableLayer* layer, const char* name = nullptr) override; - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& activationDescriptor, - const char* name = nullptr) override; - - void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override; - - void VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor& argMinMaxDescriptor, - const char* name = nullptr) override; - - void VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name = nullptr) override; - - void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name = nullptr) override; - - void VisitComparisonLayer(const IConnectableLayer* layer, - const ComparisonDescriptor& comparisonDescriptor, - const char* name = nullptr) override; - - void VisitConcatLayer(const IConnectableLayer* layer, - const OriginsDescriptor& originsDescriptor, - const char* name = nullptr) override; - - void VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name = nullptr) override; - - void VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitDepthToSpaceLayer(const IConnectableLayer* layer, - const DepthToSpaceDescriptor& depthToSpaceDescriptor, - const char* name = nullptr) override; - - void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, - const DepthwiseConvolution2dDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitElementwiseUnaryLayer(const IConnectableLayer* layer, - const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, - const char* name = nullptr) override; - - void VisitFillLayer(const IConnectableLayer* layer, - const FillDescriptor& desc, - const char* name) override; - - void VisitFullyConnectedLayer(const IConnectableLayer *layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char *name = nullptr) override; - - void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override; - - void VisitInstanceNormalizationLayer(const IConnectableLayer* layer, - const InstanceNormalizationDescriptor& instanceNormalizationDescriptor, - const char* name = nullptr) override; - - void VisitLogSoftmaxLayer(const IConnectableLayer* layer, - const LogSoftmaxDescriptor& logSoftmaxDescriptor, - const char* name = nullptr) override; - - void VisitMeanLayer(const IConnectableLayer* layer, - const MeanDescriptor& meanDescriptor, - const char* name = nullptr) override; - - void VisitMultiplicationLayer(const IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitNormalizationLayer(const IConnectableLayer* layer, - const NormalizationDescriptor& normalizationDescriptor, - const char* name = nullptr) override; - - void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override; - - void VisitPadLayer(const IConnectableLayer*, - const PadDescriptor&, - const char* name = nullptr) override; - - void VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& permuteDescriptor, - const char* name = nullptr) override; - - void VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& pooling2dDescriptor, - const char* name = nullptr) override; - - void VisitPreluLayer(const IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name = nullptr) override; - - void VisitResizeLayer(const IConnectableLayer* layer, - const ResizeDescriptor& resizeDescriptor, - const char* name = nullptr) override; - - ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead") - void VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name = nullptr) override; - - ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") - void VisitRsqrtLayer(const IConnectableLayer*, - const char* name = nullptr) override; - - void VisitSliceLayer(const IConnectableLayer* layer, - const SliceDescriptor& sliceDescriptor, - const char* name = nullptr) override; - - void VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& softmaxDescriptor, - const char* name = nullptr) override; - - void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name = nullptr) override; - - void VisitSpaceToDepthLayer(const IConnectableLayer* layer, - const SpaceToDepthDescriptor& spaceToDepthDescriptor, - const char* name = nullptr) override; - - void VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& splitterDescriptor, - const char* name = nullptr) override; - - void VisitStackLayer(const IConnectableLayer* layer, - const StackDescriptor& stackDescriptor, - const char* name = nullptr) override; - - void VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& stridedSliceDescriptor, - const char* name = nullptr) override; - - void VisitSubtractionLayer(const IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitTransposeConvolution2dLayer(const IConnectableLayer* layer, - const TransposeConvolution2dDescriptor& descriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitTransposeLayer(const IConnectableLayer* layer, - const TransposeDescriptor& descriptor, - const char* name = nullptr) override; - - /// Extract the quantized network - INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); } - -private: - /// Connects the layer to preceeding layers and sets the quantization parameters based on recorded ranges - void SetQuantizedInputConnections(const IConnectableLayer* srcLayer, IConnectableLayer* quantizedLayer); - - /// Record the guids so we can easily find the layers later - void RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* qLayer); - - /// Sets the bias quantization scale based on input and weight scales - ConstTensor CreateQuantizedBias(const IConnectableLayer* srcLayer, - const ConstTensor& weights, - const Optional& biases, - std::vector& weightsBacking); - - /// Reference to the static range visitor used to retrieve the quantization ranges - const RangeTracker& m_Ranges; - - /// Quantized version of the model we are building up - INetworkPtr m_QuantizedNetwork; - - /// Mapping from input network guids to quantized network guids - std::unordered_map m_OriginalToQuantizedGuidMap; - - /// Mapping from guid to layer in quantized network - std::unordered_map m_QuantizedGuidToLayerMap; - - const IQuantizationScheme* m_QuantizationScheme; - - const bool m_PreserveType; -}; - -} //namespace armnn diff --git a/src/armnn/StaticRangeStrategy.cpp b/src/armnn/StaticRangeStrategy.cpp new file mode 100644 index 0000000000..84b8d24068 --- /dev/null +++ b/src/armnn/StaticRangeStrategy.cpp @@ -0,0 +1,193 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "StaticRangeStrategy.hpp" + +#include +#include +#include + +#include + +namespace armnn +{ + +StaticRangeStrategy::StaticRangeStrategy(RangeTracker& rangeTracker) + : m_RangeTracker(rangeTracker) +{} + +void StaticRangeStrategy::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max) +{ + m_RangeTracker.SetRange(layer, outputIdx, min, max); +} + +void StaticRangeStrategy::ForwardParentParameters(const IConnectableLayer* layer) +{ + const auto parentRange = m_RangeTracker.GetRange(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), 0); + SetRange(layer, 0, parentRange.first, parentRange.second); +} + + +void StaticRangeStrategy::ExecuteStrategy(const armnn::IConnectableLayer *layer, + const BaseDescriptor &descriptor, + const std::vector &constants, + const char *name, + const armnn::LayerBindingId id) +{ +IgnoreUnused(id, name); + +switch (layer->GetType()) +{ + case armnn::LayerType::Activation : + { + const ActivationDescriptor& activationDescriptor = static_cast(descriptor); + + switch (activationDescriptor.m_Function) + { + // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu + case ActivationFunction::Abs: + case ActivationFunction::Linear: + case ActivationFunction::ReLu: + case ActivationFunction::SoftReLu: + SetRange(layer, 0, 0.f, 15.f); + break; + case ActivationFunction::BoundedReLu: + SetRange(layer, 0, 0.f, activationDescriptor.m_A); + break; + case ActivationFunction::TanH: + SetRange(layer, 0, -1.f, 1.f); + break; + case ActivationFunction::LeakyReLu: + SetRange(layer, 0, -5.f, 15.f); + break; + default: + SetRange(layer, 0, -15.f, 15.f); + break; + } + break; + } + case armnn::LayerType::Addition : + { + SetRange(layer, 0, -20.f, 20.f); + break; + } + case armnn::LayerType::ArgMinMax : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::BatchToSpaceNd : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::BatchNormalization : + { + SetRange(layer, 0, -15.0f, 15.0f); + break; + } + case armnn::LayerType::Concat : + { + float min = std::numeric_limits::max(); + float max = std::numeric_limits::lowest(); + for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) + { + const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection(); + LayerGuid layerId = outputSlot->GetOwningLayerGuid(); + unsigned int slotIndex = outputSlot->CalculateIndexOnOwner(); + RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex); + min = std::min(min, range.first); + max = std::max(max, range.second); + } + SetRange(layer, 0, min, max); + break; + } + case armnn::LayerType::Constant : + { + + if (constants[0].GetDataType() != DataType::Float32) + { + throw InvalidArgumentException("Quantization is supported only for FP32 tensors"); + } + + // Work out the range based on the input constants + unsigned int inputNumElements = constants[0].GetNumElements(); + const float* inputData = reinterpret_cast(constants[0].GetMemoryArea()); + + float min = std::numeric_limits::max(); + float max = std::numeric_limits::lowest(); + + for (unsigned int i = 0; i < inputNumElements; i++) + { + const float inputValue = inputData[i]; + + min = std::min(min, inputValue); + max = std::max(max, inputValue); + } + SetRange(layer, 0, min, max); + break; + } + case armnn::LayerType::Convolution2d : + { + SetRange(layer, 0, -15.0f, 15.0f); + break; + } + case armnn::LayerType::DepthwiseConvolution2d : + { + SetRange(layer, 0, -15.0f, 15.0f); + break; + } + case armnn::LayerType::FullyConnected : + { + SetRange(layer, 0, -15.0f, 15.0f); + break; + } + case armnn::LayerType::Permute : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::Pooling2d : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::Reshape : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::Resize : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::Splitter : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::SpaceToBatchNd : + { + ForwardParentParameters(layer); + break; + } + case armnn::LayerType::Softmax : + { + SetRange(layer, 0, 0.f, 1.f); + break; + } + case armnn::LayerType::StridedSlice : + { + ForwardParentParameters(layer); + break; + } + default: + { + } +} +} + +} //namespace armnn diff --git a/src/armnn/StaticRangeStrategy.hpp b/src/armnn/StaticRangeStrategy.hpp new file mode 100644 index 0000000000..ed7cf274fe --- /dev/null +++ b/src/armnn/StaticRangeStrategy.hpp @@ -0,0 +1,41 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/LayerVisitorBase.hpp" +#include "RangeTracker.hpp" + +#include +#include + + +namespace armnn +{ + +class StaticRangeStrategy : public IStrategy +{ +public: + StaticRangeStrategy(RangeTracker& rangeTracker); + ~StaticRangeStrategy() = default; + + void ExecuteStrategy(const armnn::IConnectableLayer *layer, + const BaseDescriptor &descriptor, + const std::vector &constants, + const char *name, + const armnn::LayerBindingId id) override; + +private: + /// Set the range for an output slot on a layer + void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max); + + void ForwardParentParameters(const IConnectableLayer* layer); + + /// Mapping from a layer Guid to an array of ranges for outputs + RangeTracker& m_RangeTracker; + +}; + +} //namespace armnn diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp deleted file mode 100644 index 210c666739..0000000000 --- a/src/armnn/StaticRangeVisitor.cpp +++ /dev/null @@ -1,270 +0,0 @@ -// -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "StaticRangeVisitor.hpp" - -#include -#include -#include - -#include - -namespace armnn -{ - -StaticRangeVisitor::StaticRangeVisitor(RangeTracker& rangeTracker) - : m_RangeTracker(rangeTracker) -{} - -void StaticRangeVisitor::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max) -{ - m_RangeTracker.SetRange(layer, outputIdx, min, max); -} - -void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer) -{ - const auto parentRange = m_RangeTracker.GetRange(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), 0); - SetRange(layer, 0, parentRange.first, parentRange.second); -} - -void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name) -{ - IgnoreUnused(name); - SetRange(layer, 0, -20.f, 20.f); -} - -void StaticRangeVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name) -{ - IgnoreUnused(desc); - IgnoreUnused(mean); - IgnoreUnused(variance); - IgnoreUnused(beta); - IgnoreUnused(gamma); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); -} - -void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - IgnoreUnused(convolution2dDescriptor); - IgnoreUnused(weights); - IgnoreUnused(biases); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); -} - -void StaticRangeVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, - const DepthwiseConvolution2dDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - IgnoreUnused(desc); - IgnoreUnused(weights); - IgnoreUnused(biases); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); -} - -void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& activationDescriptor, - const char* name) -{ - IgnoreUnused(name); - switch (activationDescriptor.m_Function) - { - // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu - case ActivationFunction::Abs: - case ActivationFunction::Linear: - case ActivationFunction::ReLu: - case ActivationFunction::SoftReLu: - SetRange(layer, 0, 0.f, 15.f); - break; - case ActivationFunction::BoundedReLu: - SetRange(layer, 0, 0.f, activationDescriptor.m_A); - break; - case ActivationFunction::TanH: - SetRange(layer, 0, -1.f, 1.f); - break; - case ActivationFunction::LeakyReLu: - SetRange(layer, 0, -5.f, 15.f); - break; - default: - SetRange(layer, 0, -15.f, 15.f); - break; - } -} - -void StaticRangeVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer, - const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor, - const char* name) -{ - IgnoreUnused(argMinMaxDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer* layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name) -{ - IgnoreUnused(desc); - IgnoreUnused(weights); - IgnoreUnused(biases); - IgnoreUnused(name); - SetRange(layer, 0, -15.0f, 15.0f); -} - -void StaticRangeVisitor::VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& permuteDescriptor, - const char* name) -{ - IgnoreUnused(permuteDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name) -{ - IgnoreUnused(spaceToBatchNdDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& pooling2dDescriptor, - const char* name) -{ - IgnoreUnused(pooling2dDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& softmaxDescriptor, - const char* name) -{ - IgnoreUnused(softmaxDescriptor); - IgnoreUnused(name); - SetRange(layer, 0, 0.f, 1.f); -} - -void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer, - const OriginsDescriptor& originsDescriptor, - const char* name) -{ - IgnoreUnused(originsDescriptor); - IgnoreUnused(name); - float min = std::numeric_limits::max(); - float max = std::numeric_limits::lowest(); - for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) - { - const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection(); - LayerGuid layerId = outputSlot->GetOwningLayerGuid(); - unsigned int slotIndex = outputSlot->CalculateIndexOnOwner(); - RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex); - min = std::min(min, range.first); - max = std::max(max, range.second); - } - SetRange(layer, 0, min, max); -} - -void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name) -{ - IgnoreUnused(name); - - if (input.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException("Quantization is supported only for FP32 tensors"); - } - - // Work out the range based on the input constants - unsigned int inputNumElements = input.GetNumElements(); - const float* inputData = reinterpret_cast(input.GetMemoryArea()); - - float min = std::numeric_limits::max(); - float max = std::numeric_limits::lowest(); - - for (unsigned int i = 0; i < inputNumElements; i++) - { - const float inputValue = inputData[i]; - - min = std::min(min, inputValue); - max = std::max(max, inputValue); - } - SetRange(layer, 0, min, max); -} - -void StaticRangeVisitor::VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name) -{ - IgnoreUnused(reshapeDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& splitterDescriptor, - const char* name) -{ - IgnoreUnused(splitterDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name) -{ - IgnoreUnused(resizeDesc); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitResizeLayer(const IConnectableLayer* layer, - const ResizeDescriptor& resizeDescriptor, - const char* name) -{ - IgnoreUnused(resizeDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& stridedSliceDescriptor, - const char* name) -{ - IgnoreUnused(stridedSliceDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -void StaticRangeVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name) -{ - IgnoreUnused(batchToSpaceNdDescriptor); - IgnoreUnused(name); - ForwardParentParameters(layer); -} - -} //namespace armnn diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp deleted file mode 100644 index 20e3cb0292..0000000000 --- a/src/armnn/StaticRangeVisitor.hpp +++ /dev/null @@ -1,120 +0,0 @@ -// -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "armnn/LayerVisitorBase.hpp" -#include "RangeTracker.hpp" - -#include -#include - - -namespace armnn -{ - -/// Visitor class to establish min/max ranges based on the type of the layer -class StaticRangeVisitor : public LayerVisitorBase -{ -public: - StaticRangeVisitor(RangeTracker& rangeTracker); - ~StaticRangeVisitor() = default; - - /// Functions to set the Range on a per-layer-type basis - void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override; - - void VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor& desc, - const char* name = nullptr) override; - - void VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name = nullptr) override; - - void VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, - const DepthwiseConvolution2dDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override; - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& activationDescriptor, - const char* name = nullptr) override; - - void VisitFullyConnectedLayer(const IConnectableLayer *layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char *name) override; - - void VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& permuteDescriptor, - const char* name) override; - - void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name = nullptr) override; - - void VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& pooling2dDescriptor, - const char* name) override; - - void VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& softmaxDescriptor, - const char* name = nullptr) override; - - void VisitConcatLayer(const IConnectableLayer* layer, - const OriginsDescriptor& originsDescriptor, - const char* name = nullptr) override; - - void VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name = nullptr) override; - - void VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name = nullptr) override; - - void VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& splitterDescriptor, - const char* name = nullptr) override; - - void VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name = nullptr) override; - - void VisitResizeLayer(const IConnectableLayer* layer, - const ResizeDescriptor& resizeDescriptor, - const char* name = nullptr) override; - - void VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& stridedSliceDescriptor, - const char* name = nullptr) override; - - void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name = nullptr) override; - -private: - /// Set the range for an output slot on a layer - void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max); - - void ForwardParentParameters(const IConnectableLayer* layer); - - /// Mapping from a layer Guid to an array of ranges for outputs - RangeTracker& m_RangeTracker; -}; - -} //namespace armnn diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp index ce351a4376..6df5195a55 100644 --- a/src/armnn/layers/BatchNormalizationLayer.cpp +++ b/src/armnn/layers/BatchNormalizationLayer.cpp @@ -80,4 +80,14 @@ void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName()); } +void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_Mean->GetTensorInfo(), m_Mean->Map(true)}, + {m_Variance->GetTensorInfo(), m_Variance->Map(true)}, + {m_Beta->GetTensorInfo(), m_Beta->Map(true)}, + {m_Gamma->GetTensorInfo(), m_Gamma->Map(true)} }; + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp index 3915897a52..dab75d1e12 100644 --- a/src/armnn/layers/BatchNormalizationLayer.hpp +++ b/src/armnn/layers/BatchNormalizationLayer.hpp @@ -41,6 +41,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a BatchNormalizationLayer. /// @param [in] param BatchNormalizationDescriptor to configure the batch normalization operation. diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp index 76b9997cfe..31e9e974cf 100644 --- a/src/armnn/layers/ConstantLayer.cpp +++ b/src/armnn/layers/ConstantLayer.cpp @@ -68,4 +68,10 @@ void ConstantLayer::Accept(ILayerVisitor& visitor) const visitor.VisitConstantLayer(this, layerOutputTensor, GetName()); } +void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_LayerOutput->GetTensorInfo(), m_LayerOutput->Map(true)} }; + strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp index 36fa1f96e9..9d91551df9 100644 --- a/src/armnn/layers/ConstantLayer.hpp +++ b/src/armnn/layers/ConstantLayer.hpp @@ -41,6 +41,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + std::unique_ptr m_LayerOutput; protected: diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp index 18557bf64e..0c3040ea6e 100644 --- a/src/armnn/layers/Convolution2dLayer.cpp +++ b/src/armnn/layers/Convolution2dLayer.cpp @@ -157,4 +157,16 @@ void Convolution2dLayer::Accept(ILayerVisitor& visitor) const visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName()); } +void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} }; + + if (GetParameters().m_BiasEnabled) + { + constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true))); + } + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp index 4dd1497fd8..440c80dfa9 100644 --- a/src/armnn/layers/Convolution2dLayer.hpp +++ b/src/armnn/layers/Convolution2dLayer.hpp @@ -44,6 +44,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + void SerializeLayerParameters(ParameterStringifyFunction& fn) const override; protected: diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp index ff9cebafd5..1871b7d15d 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp @@ -165,4 +165,16 @@ void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName()); } +void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} }; + + if (GetParameters().m_BiasEnabled) + { + constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true))); + } + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp index dd0b0e6b88..7388cbcd8e 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp @@ -43,6 +43,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + void SerializeLayerParameters(ParameterStringifyFunction& fn) const override; protected: diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp index d54bf26c40..356377a2f5 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.cpp +++ b/src/armnn/layers/DetectionPostProcessLayer.cpp @@ -84,4 +84,11 @@ void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName()); } +void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_Anchors->GetTensorInfo(), m_Anchors->GetConstTensor()} }; + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp index 374eef5ec5..b0d58589b4 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.hpp +++ b/src/armnn/layers/DetectionPostProcessLayer.hpp @@ -36,6 +36,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a DetectionPostProcessLayer. /// @param [in] param DetectionPostProcessDescriptor to configure the detection postprocess. diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp index 631e08c2ac..a169d31b2d 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.cpp +++ b/src/armnn/layers/ElementwiseBaseLayer.cpp @@ -82,4 +82,9 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs() ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType())); } +void ElementwiseBaseLayer::ExecuteStrategy(IStrategy& strategy) const +{ + strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp index 3893dcd9f9..17e8b446e0 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.hpp +++ b/src/armnn/layers/ElementwiseBaseLayer.hpp @@ -27,6 +27,8 @@ public: /// @return A vector to the inferred output shape. std::vector InferOutputShapes(const std::vector& inputShapes) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// @param numInputSlots The number of input slots for the layer. /// @param numOutputSlots The number of output slots for the layer. diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp index a316b2b82a..102a6725a7 100644 --- a/src/armnn/layers/FakeQuantizationLayer.cpp +++ b/src/armnn/layers/FakeQuantizationLayer.cpp @@ -52,4 +52,10 @@ void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph"); } +void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const +{ + IgnoreUnused(strategy); + throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph"); +} + } // namespace armnn diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp index 09bd530f86..78e49e6474 100644 --- a/src/armnn/layers/FakeQuantizationLayer.hpp +++ b/src/armnn/layers/FakeQuantizationLayer.hpp @@ -30,6 +30,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a FakeQuantizationLayer. /// @param [in] param FakeQuantizationDescriptor to configure the fake quantization operation. diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp index ca7a0cc4bb..0e5e5942de 100644 --- a/src/armnn/layers/FullyConnectedLayer.cpp +++ b/src/armnn/layers/FullyConnectedLayer.cpp @@ -101,4 +101,16 @@ void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName()); } +void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} }; + + if (GetParameters().m_BiasEnabled) + { + constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true))); + } + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp index bbacd2551d..4a9cbe1136 100644 --- a/src/armnn/layers/FullyConnectedLayer.hpp +++ b/src/armnn/layers/FullyConnectedLayer.hpp @@ -43,6 +43,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a FullyConnectedLayer. /// @param [in] param FullyConnectedDescriptor to configure the fully connected operation. diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp index 3f3bdd8050..952eff66ff 100644 --- a/src/armnn/layers/LayerWithParameters.hpp +++ b/src/armnn/layers/LayerWithParameters.hpp @@ -48,6 +48,11 @@ protected: /// The parameters for the layer (not including tensor-valued weights etc.). Parameters m_Param; + + void ExecuteStrategy(IStrategy& strategy) const override + { + strategy.ExecuteStrategy(this, GetParameters(), {}, GetName()); + } }; } // namespace diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp index 8e396ab70c..ebc408a636 100644 --- a/src/armnn/layers/LstmLayer.cpp +++ b/src/armnn/layers/LstmLayer.cpp @@ -480,4 +480,150 @@ void LstmLayer::Accept(ILayerVisitor& visitor) const visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName()); } +void LstmLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors; + + LstmDescriptor descriptor = GetParameters(); + + // First add mandatory/basic parameters + if (m_BasicParameters.m_InputToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), + m_BasicParameters.m_InputToForgetWeights->Map(true))); + } + if (m_BasicParameters.m_InputToCellWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), + m_BasicParameters.m_InputToCellWeights->Map(true))); + } + if (m_BasicParameters.m_InputToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), + m_BasicParameters.m_InputToOutputWeights->Map(true))); + } + if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), + m_BasicParameters.m_RecurrentToForgetWeights->Map(true))); + } + if (m_BasicParameters.m_RecurrentToCellWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), + m_BasicParameters.m_RecurrentToCellWeights->Map(true))); + } + if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), + m_BasicParameters.m_RecurrentToOutputWeights->Map(true))); + } + if (m_BasicParameters.m_ForgetGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), + m_BasicParameters.m_ForgetGateBias->Map(true))); + } + if (m_BasicParameters.m_CellBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_CellBias->GetTensorInfo(), + m_BasicParameters.m_CellBias->Map(true))); + } + if (m_BasicParameters.m_OutputGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_OutputGateBias->GetTensorInfo(), + m_BasicParameters.m_OutputGateBias->Map(true))); + } + + // Add cifg parameters + if (!descriptor.m_CifgEnabled) + { + if (m_CifgParameters.m_InputToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), + m_CifgParameters.m_InputToInputWeights->Map(true))); + } + if (m_CifgParameters.m_RecurrentToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), + m_CifgParameters.m_RecurrentToInputWeights->Map(true))); + } + if (m_CifgParameters.m_InputGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(), + m_CifgParameters.m_InputGateBias->Map(true))); + } + } + + // Add peephole parameters + if (descriptor.m_PeepholeEnabled) + { + if (!descriptor.m_CifgEnabled) + { + if (m_PeepholeParameters.m_CellToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(), + m_PeepholeParameters.m_CellToInputWeights->Map(true))); + } + } + if (m_PeepholeParameters.m_CellToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), + m_PeepholeParameters.m_CellToForgetWeights->Map(true))); + } + if (m_PeepholeParameters.m_CellToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), + m_PeepholeParameters.m_CellToOutputWeights->Map(true))); + } + } + + // Add projection parameters + if (descriptor.m_ProjectionEnabled) + { + if (m_ProjectionParameters.m_ProjectionWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), + m_ProjectionParameters.m_ProjectionWeights->Map(true))); + } + if (m_ProjectionParameters.m_ProjectionBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), + m_ProjectionParameters.m_ProjectionBias->Map(true))); + } + } + + // Add norm parameters + if (descriptor.m_LayerNormEnabled) + { + if (!descriptor.m_CifgEnabled) + { + if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_InputLayerNormWeights->Map(true))); + } + } + if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true))); + } + if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_CellLayerNormWeights->Map(true))); + } + if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_OutputLayerNormWeights->Map(true))); + } + } + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp index 51348d7015..30f952e276 100644 --- a/src/armnn/layers/LstmLayer.hpp +++ b/src/armnn/layers/LstmLayer.hpp @@ -107,6 +107,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a LstmLayer. /// @param [in] param LstmDescriptor to configure the lstm operation. diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp index d9a802c23c..40c1b98012 100644 --- a/src/armnn/layers/MemCopyLayer.cpp +++ b/src/armnn/layers/MemCopyLayer.cpp @@ -55,4 +55,10 @@ void MemCopyLayer::Accept(ILayerVisitor& visitor) const throw armnn::Exception("MemCopyLayer should not appear in an input graph"); } +void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const +{ + IgnoreUnused(strategy); + throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph"); +} + } // namespace armnn diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp index 996d6872d3..b913c529e5 100644 --- a/src/armnn/layers/MemCopyLayer.hpp +++ b/src/armnn/layers/MemCopyLayer.hpp @@ -30,6 +30,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a MemCopyLayer. /// @param [in] name Optional name for the layer. diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp index 3d1c702946..c96f92bc5e 100644 --- a/src/armnn/layers/MemImportLayer.cpp +++ b/src/armnn/layers/MemImportLayer.cpp @@ -55,4 +55,10 @@ void MemImportLayer::Accept(ILayerVisitor& visitor) const throw armnn::Exception("MemImportLayer should not appear in an input graph"); } +void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const +{ + IgnoreUnused(strategy); + throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph"); +} + } // namespace armnn diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp index 1cbdaac00b..47379701c7 100644 --- a/src/armnn/layers/MemImportLayer.hpp +++ b/src/armnn/layers/MemImportLayer.hpp @@ -30,6 +30,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a MemImportLayer. /// @param [in] name Optional name for the layer. diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp index dbbc1fd716..75c1e46a84 100644 --- a/src/armnn/layers/PreCompiledLayer.cpp +++ b/src/armnn/layers/PreCompiledLayer.cpp @@ -55,4 +55,10 @@ void PreCompiledLayer::Accept(ILayerVisitor& visitor) const throw armnn::Exception("PreCompiledLayer should not appear in an input graph"); } +void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const +{ + IgnoreUnused(strategy); + throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph"); +} + } // namespace armnn diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp index a4851c778f..2ed87578a4 100644 --- a/src/armnn/layers/PreCompiledLayer.hpp +++ b/src/armnn/layers/PreCompiledLayer.hpp @@ -35,6 +35,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + private: PreCompiledLayer(const PreCompiledLayer& other) = delete; PreCompiledLayer& operator=(const PreCompiledLayer& other) = delete; diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp index 85f99bddf9..d957bbb485 100644 --- a/src/armnn/layers/QLstmLayer.cpp +++ b/src/armnn/layers/QLstmLayer.cpp @@ -503,4 +503,130 @@ void QLstmLayer::Accept(ILayerVisitor& visitor) const visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName()); } + +void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors; + + // First add mandatory/basic parameters + if (m_BasicParameters.m_InputToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), + m_BasicParameters.m_InputToForgetWeights->Map(true))); + } + if (m_BasicParameters.m_InputToCellWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), + m_BasicParameters.m_InputToCellWeights->Map(true))); + } + if (m_BasicParameters.m_InputToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), + m_BasicParameters.m_InputToOutputWeights->Map(true))); + } + if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), + m_BasicParameters.m_RecurrentToForgetWeights->Map(true))); + } + if (m_BasicParameters.m_RecurrentToCellWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), + m_BasicParameters.m_RecurrentToCellWeights->Map(true))); + } + if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), + m_BasicParameters.m_RecurrentToOutputWeights->Map(true))); + } + if (m_BasicParameters.m_ForgetGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), + m_BasicParameters.m_ForgetGateBias->Map(true))); + } + if (m_BasicParameters.m_CellBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_CellBias->GetTensorInfo(), + m_BasicParameters.m_CellBias->Map(true))); + } + if (m_BasicParameters.m_OutputGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_BasicParameters.m_OutputGateBias->GetTensorInfo(), + m_BasicParameters.m_OutputGateBias->Map(true))); + } + + // Add cifig parameters + if (m_CifgParameters.m_InputToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), + m_CifgParameters.m_InputToInputWeights->Map(true))); + } + if (m_CifgParameters.m_RecurrentToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), + m_CifgParameters.m_RecurrentToInputWeights->Map(true))); + } + if (m_CifgParameters.m_InputGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(), + m_CifgParameters.m_InputGateBias->Map(true))); + } + + // Add peephole parameters + if (m_PeepholeParameters.m_CellToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(), + m_PeepholeParameters.m_CellToInputWeights->Map(true))); + } + if (m_PeepholeParameters.m_CellToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), + m_PeepholeParameters.m_CellToForgetWeights->Map(true))); + } + if (m_PeepholeParameters.m_CellToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), + m_PeepholeParameters.m_CellToOutputWeights->Map(true))); + } + + // Add projection parameters + if (m_ProjectionParameters.m_ProjectionWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), + m_ProjectionParameters.m_ProjectionWeights->Map(true))); + } + if (m_ProjectionParameters.m_ProjectionBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), + m_ProjectionParameters.m_ProjectionBias->Map(true))); + } + + // Add norm parameters + if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_InputLayerNormWeights->Map(true))); + } + if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true))); + } + if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_CellLayerNormWeights->Map(true))); + } + if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), + m_LayerNormParameters.m_OutputLayerNormWeights->Map(true))); + } + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp index 5757ef6559..70cc4f2b15 100644 --- a/src/armnn/layers/QLstmLayer.hpp +++ b/src/armnn/layers/QLstmLayer.hpp @@ -109,6 +109,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a QLstmLayer. /// @param [in] name Optional name for the layer. diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp index 624e443064..578d9eb137 100644 --- a/src/armnn/layers/QuantizedLstmLayer.cpp +++ b/src/armnn/layers/QuantizedLstmLayer.cpp @@ -291,4 +291,91 @@ void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const visitor.VisitQuantizedLstmLayer(this, inputParams, GetName()); } +void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors; + + // InputToX weight tensors + if (m_QuantizedLstmParameters.m_InputToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_InputToInputWeights->Map(true))); + } + + if (m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_InputToForgetWeights->Map(true))); + } + + if (m_QuantizedLstmParameters.m_InputToCellWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_InputToCellWeights->Map(true))); + } + + if (m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_InputToOutputWeights->Map(true))); + } + + // RecurrentToX weight tensors + if (m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_RecurrentToInputWeights->Map(true))); + } + + if (m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Map(true))); + } + + if (m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_RecurrentToCellWeights->Map(true))); + } + + if (m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr) + { + constTensors.emplace_back(ConstTensor( + m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo(), + m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Map(true))); + } + + // Bias tensors + if (m_QuantizedLstmParameters.m_InputGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(), + m_QuantizedLstmParameters.m_InputGateBias->Map(true))); + } + + if (m_QuantizedLstmParameters.m_ForgetGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(), + m_QuantizedLstmParameters.m_ForgetGateBias->Map(true))); + } + + if (m_QuantizedLstmParameters.m_CellBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(), + m_QuantizedLstmParameters.m_CellBias->Map(true))); + } + + if (m_QuantizedLstmParameters.m_OutputGateBias != nullptr) + { + constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(), + m_QuantizedLstmParameters.m_OutputGateBias->Map(true))); + } + + + strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp index bfe86a4629..544acbd816 100644 --- a/src/armnn/layers/QuantizedLstmLayer.hpp +++ b/src/armnn/layers/QuantizedLstmLayer.hpp @@ -71,6 +71,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a QuantizedLstmLayer. /// @param [in] name Optional name for the layer. diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp index 2b0dffe370..3b14ef0d93 100644 --- a/src/armnn/layers/RankLayer.cpp +++ b/src/armnn/layers/RankLayer.cpp @@ -46,4 +46,9 @@ void RankLayer::Accept(ILayerVisitor& visitor) const visitor.VisitRankLayer(this, GetName()); } +void RankLayer::ExecuteStrategy(IStrategy& strategy) const +{ + strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName()); +} + } //namespace armnn \ No newline at end of file diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp index f4f1ec9e66..fbd2824bb5 100644 --- a/src/armnn/layers/RankLayer.hpp +++ b/src/armnn/layers/RankLayer.hpp @@ -24,7 +24,9 @@ class RankLayer : public Layer void Accept(ILayerVisitor& visitor) const override; - protected: + void ExecuteStrategy(IStrategy& strategy) const override; + +protected: RankLayer(const char* name); ~RankLayer() = default; }; diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp index 189e5f6168..bd8cb096e2 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.cpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -135,4 +135,16 @@ void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName()); } +void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const +{ + std::vector constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} }; + + if (GetParameters().m_BiasEnabled) + { + constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true))); + } + + strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName()); +} + } // namespace armnn diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp index 1ee984d231..903c957393 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.hpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp @@ -42,6 +42,8 @@ public: void Accept(ILayerVisitor& visitor) const override; + void ExecuteStrategy(IStrategy& strategy) const override; + protected: /// Constructor to create a TransposeConvolution2dLayer. /// @param [in] param TransposeConvolution2dDescriptor to configure the 2D transpose convolution operation. diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index da85029373..67d0f95292 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -7,10 +7,8 @@ #include "../Network.hpp" #include "../NetworkQuantizerUtils.hpp" #include "../OverrideInputRangeVisitor.hpp" -#include "../RangeTracker.hpp" #include -#include #include #include #include @@ -37,45 +35,332 @@ const float g_TestTolerance = 0.000001f; BOOST_AUTO_TEST_SUITE(Quantizer) -class TestQuantization : public LayerVisitorBase +class TestQuantization : public IStrategy { public: - TestQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : LayerVisitorBase() - , m_InputShape(inputShape) - , m_OutputShape(outputShape) - , m_QuantizerOptions(QuantizerOptions()) {} + TestQuantization(const TensorShape &inputShape, const TensorShape &outputShape) + : m_InputShape(inputShape), m_OutputShape(outputShape), m_QuantizerOptions(QuantizerOptions()) + {} TestQuantization(const QuantizerOptions& options, const TensorShape& inputShape, const TensorShape& outputShape) - : LayerVisitorBase() - , m_InputShape(inputShape) + : m_InputShape(inputShape) , m_OutputShape(outputShape) , m_QuantizerOptions(options) {} - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override + void ExecuteStrategy(const armnn::IConnectableLayer *layer, + const BaseDescriptor &descriptor, + const std::vector &constants, + const char *name, + const armnn::LayerBindingId id) override { IgnoreUnused(id, name); + + if (layer->GetType() == armnn::LayerType::Output) + { + const TensorInfo &info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + BOOST_TEST(m_OutputShape == info.GetShape()); + return; + } + const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); - BOOST_TEST(m_InputShape == info.GetShape()); - // Based off current default [-15.0f, 15.0f] - TestQuantizationParams(info, {30.0f / g_AsymmU8QuantizationBase, 128}, - {30.0f / g_AsymmS8QuantizationBase, 0}, - {15.0f / g_SymmS8QuantizationBase , 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); + + switch (layer->GetType()) + { + case armnn::LayerType::BatchToSpaceNd : + case armnn::LayerType::Permute : + case armnn::LayerType::Pooling2d : + case armnn::LayerType::Reshape : + case armnn::LayerType::Resize : + case armnn::LayerType::SpaceToBatchNd : + case armnn::LayerType::Splitter : + case armnn::LayerType::StridedSlice : + { + CheckDefaultQuantizationSettings(info); + break; + } + case armnn::LayerType::Addition : + { + + // Based off default static range [-20.0f, 20.0f] + TestQuantizationParams(info, {40.0f / g_AsymmU8QuantizationBase, 128}, + {40.0f / g_AsymmS8QuantizationBase, 0}, + {20.0f / g_SymmS8QuantizationBase, 0}, + {20.0f / g_SymmS16QuantizationBase, 0}); + break; + } + case armnn::LayerType::Activation : + { + const ActivationDescriptor& activationDescriptor = static_cast(descriptor); + + switch (activationDescriptor.m_Function) + { + case ActivationFunction::BoundedReLu : + { + // Based off default static range [0.0f, 3.5f] + TestQuantizationParams(info, {3.5f / g_AsymmU8QuantizationBase, 0}, + {3.5f / g_AsymmS8QuantizationBase, -128}, + {3.5f / g_SymmS8QuantizationBase, 0}, + {3.5f / g_SymmS16QuantizationBase, 0}); + break; + } + case ActivationFunction::Elu : + { + TestQuantizationParams( + info, {30.0f / g_AsymmU8QuantizationBase, 128}, + {30.0f / g_AsymmS8QuantizationBase, 0}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + break; + } + case ActivationFunction::HardSwish : + { + TestQuantizationParams(info, {30.0f / g_AsymmU8QuantizationBase, 128}, + {30.0f / g_AsymmS8QuantizationBase, 0}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + break; + } + case ActivationFunction::LeakyReLu : + { + // Based off default static range [-5.0f, 15.0f] + TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64}, + {20.0f / g_AsymmS8QuantizationBase,-64}, + {15.0f / g_SymmS8QuantizationBase , 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + break; + } + case ActivationFunction::TanH : + { + TestQuantizationParams(info, {2.0f / g_AsymmU8QuantizationBase, 128}, + {2.0f / g_AsymmS8QuantizationBase, 0}, + {1.0f / g_SymmS8QuantizationBase , 0}, + {1.0f / g_SymmS16QuantizationBase, 0}); + break; + } + default: + { + // Based off default static range [0.0f, 15.0f] + TestQuantizationParams(info, {15.0f / g_AsymmU8QuantizationBase, 0}, + {15.0f / g_AsymmS8QuantizationBase, -128}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + break; + } + } + break; + } + case armnn::LayerType::ArgMinMax : + { + const ArgMinMaxDescriptor& argMinMaxDescriptor = static_cast(descriptor); + + if(argMinMaxDescriptor.m_Function == ArgMinMaxFunction::Max) + { + break; + } + TestQuantizationParams(info, + { 30.0f / g_AsymmU8QuantizationBase, 128 }, + { 30.0f / g_AsymmS8QuantizationBase, 0}, + { 15.0f / g_SymmS8QuantizationBase, 0}, + { 15.0f / g_SymmS16QuantizationBase, 0 }); + break; + } + case armnn::LayerType::BatchNormalization : + { + + // Based off default static range [-15.0f, 15.0f] + TestQuantizationParams( + info, {30.0f / g_AsymmU8QuantizationBase, 128}, + {30.0f / g_AsymmS8QuantizationBase, 0}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + + // Test constants + TestConstantQuantizationParams(constants[0].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); + TestConstantQuantizationParams(constants[1].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); + TestConstantQuantizationParams(constants[2].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); + TestConstantQuantizationParams(constants[3].GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); + break; + } + case armnn::LayerType::Comparison : + { + + const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; + const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; + + TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); + + break; + } + case armnn::LayerType::Constant : + { + + // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f] + TestQuantizationParams(info, {8.0f / g_AsymmU8QuantizationBase, 64}, + {8.0f / g_AsymmS8QuantizationBase, -64}, + {6.0f / g_SymmS8QuantizationBase, 0}, + {6.0f / g_SymmS16QuantizationBase, 0}); + + break; + } + case armnn::LayerType::Convolution2d : + { + if (constants.size() == 1) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional()); + } + else if (constants.size() == 1) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]); + } + break; + } + case armnn::LayerType::DepthwiseConvolution2d : + { + if (constants.size() == 2) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]); + } + else if (constants.size() == 1) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional()); + } + break; + } + case armnn::LayerType::DepthToSpace : + { + const OffsetScalePair qAsymmU8Params{30.0f / g_AsymmU8QuantizationBase, 128}; + const OffsetScalePair qAsymmS8Params{30.0f / g_AsymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS8Params{15.0f / g_SymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS16Params{15.0f / g_SymmS16QuantizationBase, 0}; + + TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); + break; + } + case armnn::LayerType::FullyConnected : + { + if (constants.size() == 2) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]); + } + else if (constants.size() == 1) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional()); + } + + break; + } + case armnn::LayerType::Fill : + { + const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; + const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; + + TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); + break; + } + case armnn::LayerType::Input : + { + BOOST_TEST(m_InputShape == info.GetShape()); + // Based off current default [-15.0f, 15.0f] + TestQuantizationParams(info, {30.0f / g_AsymmU8QuantizationBase, 128}, + {30.0f / g_AsymmS8QuantizationBase, 0}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + break; + } + case armnn::LayerType::InstanceNormalization : + { + const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; + const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; + + TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); + break; + } + case armnn::LayerType::LogSoftmax : + { + const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; + const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; + const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; + + TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); + break; + } + case armnn::LayerType::Slice : + { + const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; + const OffsetScalePair qAsymmS8Params{ 30.0f / g_AsymmS8QuantizationBase, 0 }; + const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0 }; + const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; + + TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); + break; + } + case armnn::LayerType::Softmax : + { + // Based off default static range [0.0f, 1.0f] + TestQuantizationParams(info, {1.0f / g_AsymmU8QuantizationBase, 0}, + {1.0f / g_AsymmS8QuantizationBase, -128}, + {1.0f / g_SymmS8QuantizationBase, 0}, + {1.0f / g_SymmS16QuantizationBase, 0}); + break; + } + case armnn::LayerType::SpaceToDepth : + { + TestQuantizationParams(info, + { 30.0f / g_AsymmU8QuantizationBase, 128 }, + { 30.0f / g_AsymmS8QuantizationBase, 0 }, + { 15.0f / g_SymmS8QuantizationBase, 0 }, + { 15.0f / g_SymmS16QuantizationBase, 0 }); + + break; + } + case armnn::LayerType::Stack : + { + TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); + + TestQuantizationParams(outputInfo, + { 30.0f / g_AsymmU8QuantizationBase, 128 }, + { 30.0f / g_AsymmS8QuantizationBase, 0}, + { 15.0f / g_SymmS8QuantizationBase, 0}, + { 15.0f / g_SymmS16QuantizationBase, 0 }); + break; + } + case armnn::LayerType::TransposeConvolution2d : + { + if (constants.size() == 2) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], constants[1]); + } + else if (constants.size() == 1) + { + TestQuantizationOnLayersWithBiases(layer, constants[0], armnn::EmptyOptional()); + } + break; + } + default: + { + throw UnimplementedException("Unimplemented layer encountered"); + } + } } - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override + +protected: + + void CheckDefaultQuantizationSettings(const TensorInfo& info) { - IgnoreUnused(id, name); - const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); - BOOST_TEST(m_OutputShape == info.GetShape()); + TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64}, + {20.0f / g_AsymmS8QuantizationBase,-64}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); } -protected: void TestQuantizationParams(const TensorInfo& info, const OffsetScalePair& qAsymmU8Params, const OffsetScalePair& qAsymmS8Params, @@ -188,39 +473,41 @@ private: QuantizerOptions m_QuantizerOptions; }; -void VisitLayersTopologically(const INetwork* inputNetwork, ILayerVisitor& visitor) +void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy) { auto network = PolymorphicDowncast(inputNetwork); auto graph = network->GetGraph().TopologicalSort(); - VisitLayers(graph, visitor); + ApplyStrategyToLayers(graph, strategy); } -class TestAdditionQuantization : public TestQuantization +void TestNetwork(INetwork* network, const TensorShape inShape, const TensorShape outShape) { -public: - TestAdditionQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} + const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8); + INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network, qAsymmU8Options)->ExportNetwork(); + TestQuantization validatorQAsymmU8(inShape, outShape); + VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - TestAdditionQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} + const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); + INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network, qAsymmS8Options)->ExportNetwork(); + TestQuantization validatorQAsymmS8(qAsymmS8Options, inShape, outShape); + VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - void VisitAdditionLayer(const IConnectableLayer* layer, - const char* name = nullptr) override - { - IgnoreUnused(name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); + const QuantizerOptions qSymmS8Options(DataType::QSymmS8); + INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network, qSymmS8Options)->ExportNetwork(); + TestQuantization validatorQSymmS8(qSymmS8Options, inShape, outShape); + VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - // Based off default static range [-20.0f, 20.0f] - TestQuantizationParams(info, {40.0f / g_AsymmU8QuantizationBase, 128}, - {40.0f / g_AsymmS8QuantizationBase, 0}, - {20.0f / g_SymmS8QuantizationBase, 0}, - {20.0f / g_SymmS16QuantizationBase, 0}); - } -}; + const QuantizerOptions qSymmS16options(DataType::QSymmS16); + INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network, qSymmS16options)->ExportNetwork(); + TestQuantization validatorQSymmS16(qSymmS16options, inShape, outShape); + VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); +} +void TestNetwork(INetwork* network, const TensorShape shape) +{ + TestNetwork(network, shape, shape); +} BOOST_AUTO_TEST_CASE(QuantizeAddition) { @@ -244,54 +531,9 @@ BOOST_AUTO_TEST_CASE(QuantizeAddition) input1->GetOutputSlot(0).SetTensorInfo(info); addition->GetOutputSlot(0).SetTensorInfo(info); - const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork(); - TestAdditionQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestAdditionQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestAdditionQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestAdditionQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } -class TestActivationQuantization : public TestQuantization -{ -public: - TestActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestActivationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [0.0f, 15.0f] - TestQuantizationParams(info, {15.0f / g_AsymmU8QuantizationBase, 0}, - {15.0f / g_AsymmS8QuantizationBase, -128}, - {15.0f / g_SymmS8QuantizationBase, 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); - } -}; - INetworkPtr CreateNetworkWithActivationLayer(const ActivationDescriptor& descriptor, const TensorShape& shape) { INetworkPtr network = INetwork::Create(); @@ -313,28 +555,6 @@ INetworkPtr CreateNetworkWithActivationLayer(const ActivationDescriptor& descrip return network; } -class TestArgMinMaxQuantization : public TestQuantization -{ -public: - TestArgMinMaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestArgMinMaxQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor&, - const char* name = nullptr) override - { - IgnoreUnused(name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - BOOST_CHECK(info.GetDataType() == DataType::Signed32); - } -}; - INetworkPtr CreateNetworkWithArgMinMaxLayer(const ArgMinMaxDescriptor& descriptor, const TensorShape& shape) { INetworkPtr network = INetwork::Create(); @@ -417,34 +637,47 @@ BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant) std::unique_ptr quantizationScheme = std::make_unique(); OffsetScalePair qParams = quantizationScheme->ComputeScheme(-77.0, 98.0); - class TestOutputLayerVisitor : public LayerVisitorBase - { - public: - TestOutputLayerVisitor(const OffsetScalePair& offsetScalePair, const DataType& dataType) : +class TestOutputStrategy : public IStrategy +{ + public : + TestOutputStrategy(const OffsetScalePair& offsetScalePair, const DataType& dataType) : m_OffsetScalePair(offsetScalePair), m_DataType(dataType) {} - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override + { + IgnoreUnused(name, constants, id, descriptor); + + switch (layer->GetType()) { - IgnoreUnused(id, name); - const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); - BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType, - std::string(armnn::GetDataTypeName(info.GetDataType())) - .append(" == ").append(armnn::GetDataTypeName(m_DataType))); - // int_32t - BOOST_CHECK(info.GetQuantizationOffset() == m_OffsetScalePair.second); - // float - BOOST_TEST(info.GetQuantizationScale() == m_OffsetScalePair.first, boost::test_tools::tolerance(0.001)); + case armnn::LayerType::Output : + { + const TensorInfo &info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType, + std::string(armnn::GetDataTypeName(info.GetDataType())) + .append(" == ").append(armnn::GetDataTypeName(m_DataType))); + // int_32t + BOOST_CHECK(info.GetQuantizationOffset() == m_OffsetScalePair.second); + // float + BOOST_TEST(info.GetQuantizationScale() == m_OffsetScalePair.first, + boost::test_tools::tolerance(0.001)); + break; + } + default: + {} } + } - private: - const OffsetScalePair m_OffsetScalePair; - const DataType m_DataType; - }; +private: + const OffsetScalePair m_OffsetScalePair; + const DataType m_DataType; +}; - TestOutputLayerVisitor visitor(qParams, quantizationScheme->GetDataType()); - quantizedNetwork->Accept(visitor); + TestOutputStrategy strategy(qParams, quantizationScheme->GetDataType()); + quantizedNetwork->ExecuteStrategy(strategy); } BOOST_AUTO_TEST_CASE(QuantizeAbsActivation) @@ -457,25 +690,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbsActivation) const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork(); - TestActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeArgMax) @@ -486,25 +701,7 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMax) const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithArgMinMaxLayer(descriptor, shape); - const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get(), qAsymmU8Options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeLinearActivation) @@ -517,236 +714,61 @@ BOOST_AUTO_TEST_CASE(QuantizeLinearActivation) const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); -} - -BOOST_AUTO_TEST_CASE(QuantizeReLuActivation) -{ - ActivationDescriptor descriptor; - descriptor.m_Function = ActivationFunction::ReLu; - descriptor.m_A = 3.5f; - descriptor.m_B = -10.0f; - - const TensorShape shape{1U}; - INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); -} - -BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation) -{ - ActivationDescriptor descriptor; - descriptor.m_Function = ActivationFunction::SoftReLu; - descriptor.m_A = 3.5f; - descriptor.m_B = -10.0f; - - const TensorShape shape{1U}; - INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); -} - -BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation) -{ - class TestBoundedReluActivationQuantization : public TestQuantization - { - public: - TestBoundedReluActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestBoundedReluActivationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [0.0f, 3.5f] - TestQuantizationParams(info, {3.5f / g_AsymmU8QuantizationBase, 0}, - {3.5f / g_AsymmS8QuantizationBase, -128}, - {3.5f / g_SymmS8QuantizationBase, 0}, - {3.5f / g_SymmS16QuantizationBase, 0}); - } - }; - - ActivationDescriptor descriptor; - descriptor.m_Function = ActivationFunction::BoundedReLu; - descriptor.m_A = 3.5f; - descriptor.m_B = -10.0f; - - const TensorShape shape{1U}; - INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestBoundedReluActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestBoundedReluActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestBoundedReluActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestBoundedReluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } -BOOST_AUTO_TEST_CASE(QuantizeTanHActivation) -{ - class TestTanHActivationQuantization : public TestQuantization - { - public: - TestTanHActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestTanHActivationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [-1.0f, 1.0f] - TestQuantizationParams( - info, {2.0f / g_AsymmU8QuantizationBase, 128}, - {2.0f / g_AsymmS8QuantizationBase, 0}, - {1.0f / g_SymmS8QuantizationBase , 0}, - {1.0f / g_SymmS16QuantizationBase, 0}); - } - }; - +BOOST_AUTO_TEST_CASE(QuantizeReLuActivation) +{ ActivationDescriptor descriptor; - descriptor.m_Function = ActivationFunction::TanH; + descriptor.m_Function = ActivationFunction::ReLu; descriptor.m_A = 3.5f; descriptor.m_B = -10.0f; const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestTanHActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); + TestNetwork(network.get(), shape); +} - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestTanHActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); +BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation) +{ + ActivationDescriptor descriptor; + descriptor.m_Function = ActivationFunction::SoftReLu; + descriptor.m_A = 3.5f; + descriptor.m_B = -10.0f; - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestTanHActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); + const TensorShape shape{1U}; + INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestTanHActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } -class TestLeakyReLuActivationQuantization : public TestQuantization +BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation) { -public: - TestLeakyReLuActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} + ActivationDescriptor descriptor; + descriptor.m_Function = ActivationFunction::BoundedReLu; + descriptor.m_A = 3.5f; + descriptor.m_B = -10.0f; + + const TensorShape shape{1U}; + INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - TestLeakyReLuActivationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} + TestNetwork(network.get(), shape); +} - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); +BOOST_AUTO_TEST_CASE(QuantizeTanHActivation) +{ + ActivationDescriptor descriptor; + descriptor.m_Function = ActivationFunction::TanH; + descriptor.m_A = 3.5f; + descriptor.m_B = -10.0f; - // Based off default static range [-5.0f, 15.0f] - TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64}, - {20.0f / g_AsymmS8QuantizationBase,-64}, - {15.0f / g_SymmS8QuantizationBase , 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); - } + const TensorShape shape{1U}; + INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); -protected: - // Used by the descendant classes which test layers - // that are forwarding their parent layer settings - void CheckForwardedQuantizationSettings(const IConnectableLayer* layer) - { - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - TestQuantizationParams(info, {20.0f / g_AsymmU8QuantizationBase, 64}, - {20.0f / g_AsymmS8QuantizationBase,-64}, - {15.0f / g_SymmS8QuantizationBase, 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); - } -}; + TestNetwork(network.get(), shape); +} BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation) { @@ -758,176 +780,34 @@ BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation) const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestLeakyReLuActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestLeakyReLuActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestLeakyReLuActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestLeakyReLuActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeELuActivation) { - class TestEluActivationQuantization : public TestQuantization - { - public: - TestEluActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestEluActivationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [-15.0f, 15.0f] - TestQuantizationParams( - info, {30.0f / g_AsymmU8QuantizationBase, 128}, - {30.0f / g_AsymmS8QuantizationBase, 0}, - {15.0f / g_SymmS8QuantizationBase, 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); - } - }; - ActivationDescriptor descriptor; descriptor.m_Function = ActivationFunction::Elu; const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestEluActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestEluActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestEluActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestEluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeHardSwishActivation) { - class TestHardSwishActivationQuantization : public TestQuantization - { - public: - TestHardSwishActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestHardSwishActivationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [-15.0f, 15.0f] - TestQuantizationParams( - info, {30.0f / g_AsymmU8QuantizationBase, 128}, - {30.0f / g_AsymmS8QuantizationBase, 0}, - {15.0f / g_SymmS8QuantizationBase, 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); - } - }; - ActivationDescriptor descriptor; descriptor.m_Function = ActivationFunction::HardSwish; const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestHardSwishActivationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestHardSwishActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestHardSwishActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestHardSwishActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeBatchNorm) { - class TestBatchNormalizationQuantization : public TestQuantization - { - public: - TestBatchNormalizationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestBatchNormalizationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitBatchNormalizationLayer(const IConnectableLayer* layer, - const BatchNormalizationDescriptor& desc, - const ConstTensor& mean, - const ConstTensor& variance, - const ConstTensor& beta, - const ConstTensor& gamma, - const char* name = nullptr) override - { - IgnoreUnused(desc, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [-15.0f, 15.0f] - TestQuantizationParams( - info, {30.0f / g_AsymmU8QuantizationBase, 128}, - {30.0f / g_AsymmS8QuantizationBase, 0}, - {15.0f / g_SymmS8QuantizationBase, 0}, - {15.0f / g_SymmS16QuantizationBase, 0}); - - // Test constants - TestConstantQuantizationParams(mean.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); - TestConstantQuantizationParams(variance.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); - TestConstantQuantizationParams(beta.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); - TestConstantQuantizationParams(gamma.GetInfo(), {3.0f / g_AsymmU8QuantizationBase, 85}); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{3U}; @@ -958,55 +838,11 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm) input0->GetOutputSlot(0).SetTensorInfo(info); batchNorm->GetOutputSlot(0).SetTensorInfo(info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestBatchNormalizationQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestBatchNormalizationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestBatchNormalizationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions QQsymm16Options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork(); - TestBatchNormalizationQuantization validatorQSymmS16(QQsymm16Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace) { - class TestDepthToSpaceQuantization : public TestQuantization - { - public: - TestDepthToSpaceQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestDepthToSpaceQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - virtual void VisitDepthToSpaceLayer(const IConnectableLayer* layer, - const DepthToSpaceDescriptor& desc, - const char* name = nullptr) - { - IgnoreUnused(desc, name); - const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); - - const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; - const OffsetScalePair qAsymmS8Params{ 30.0f / g_AsymmS8QuantizationBase, 0 }; - const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0 }; - const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; - - TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); - } - }; - const TensorShape inputShape { 1, 2, 2, 4 }; const TensorShape outputShape{ 1, 4, 4, 1 }; @@ -1026,28 +862,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace) inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); depthToSpaceLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestDepthToSpaceQuantization validatorQAsymmU8(inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - // test QAsymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestDepthToSpaceQuantization validatorQAsymmS8(qAsymmS8Options, inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestDepthToSpaceQuantization validatorQSymmS8(qSymmS8Options, inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QSymmS16 quantization - const QuantizerOptions Qsymm16Options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); - TestDepthToSpaceQuantization validatorQSymmS16(Qsymm16Options, inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), inputShape, outputShape); } BOOST_AUTO_TEST_CASE(OverrideInputRangeEmptyNetwork) @@ -1058,8 +873,8 @@ BOOST_AUTO_TEST_CASE(OverrideInputRangeEmptyNetwork) Network network; // Empty network auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers - OverrideInputRangeVisitor overrideInputRangeVisitor(ranges, 0, minMaxRange); - VisitLayers(inputLayers, overrideInputRangeVisitor); + OverrideInputRangeStrategy overrideInputRangeStrategy(ranges, 0, minMaxRange); + ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy); BOOST_CHECK(ranges.IsEmpty()); // Check that the map of ranges remained untouched } @@ -1073,8 +888,8 @@ BOOST_AUTO_TEST_CASE(OverrideInputRangeNoInputLayers) network.AddAdditionLayer(); // Network with no input layers auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers - OverrideInputRangeVisitor overrideInputRangeVisitor(ranges, 0, minMaxRange); - VisitLayers(inputLayers, overrideInputRangeVisitor); + OverrideInputRangeStrategy overrideInputRangeStrategy(ranges, 0, minMaxRange); + ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy); BOOST_CHECK(ranges.IsEmpty()); // Check that the map of ranges remained untouched } @@ -1107,15 +922,15 @@ BOOST_AUTO_TEST_CASE(OverrideInputRangeInputLayers) auto inputLayers = network.GetGraph().GetInputLayers(); // List of input layers // Trying to override the input range for the input layer with binding id 3 (does not exist in the network) - OverrideInputRangeVisitor overrideInputRangeVisitorLayer3(ranges, 3, minMaxRange); - VisitLayers(inputLayers, overrideInputRangeVisitorLayer3); + OverrideInputRangeStrategy overrideInputRangeStrategy3(ranges, 3, minMaxRange); + ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy3); // Check that the map of ranges remained untouched BOOST_CHECK(ranges.IsEmpty()); // Override the input range for the input layer with binding id 1 - OverrideInputRangeVisitor overrideInputRangeVisitorLayer1(ranges, 1, minMaxRange); - VisitLayers(inputLayers, overrideInputRangeVisitorLayer1); + OverrideInputRangeStrategy overrideInputRangeStrategy1(ranges, 1, minMaxRange); + ApplyStrategyToLayers(inputLayers, overrideInputRangeStrategy1); // Check that the map of ranges has been populated BOOST_CHECK(!ranges.IsEmpty()); @@ -1170,80 +985,14 @@ INetworkPtr CreateNetworkWithFullyConnectedLayer(const bool biasEnabled, void ValidateFullyConnectedLayer(const bool biasEnabled) { - class TestFullyConnectedQuantization : public TestQuantization - { - public: - TestFullyConnectedQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestFullyConnectedQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitFullyConnectedLayer(const IConnectableLayer* layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const Optional& biases, - const char* name = nullptr) override - { - IgnoreUnused(desc, name); - TestQuantizationOnLayersWithBiases(layer, weights, biases); - } - }; - const TensorShape shape{3U}; INetworkPtr network = CreateNetworkWithFullyConnectedLayer(biasEnabled, shape, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestFullyConnectedQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestFullyConnectedQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestFullyConnectedQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions Qsymm16Options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); - TestFullyConnectedQuantization validatorQSymmS16(Qsymm16Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeFill) { - class TestFillQuantization : public TestQuantization - { - public: - TestFillQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestFillQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - virtual void VisitFillLayer(const IConnectableLayer* layer, - const FillDescriptor& desc, - const char* name = nullptr) - { - IgnoreUnused(desc, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; - const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; - - TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); - } - }; - const TensorShape tensorShape{ 1U }; const TensorInfo tensorInfo(tensorShape, DataType::Float32); @@ -1262,28 +1011,7 @@ BOOST_AUTO_TEST_CASE(QuantizeFill) inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); fillLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestFillQuantization validatorQAsymmU8(tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - // test QAsymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestFillQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestFillQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QuantisedSymmS16 quantization - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestFillQuantization validatorQSymmS16(qSymmS16options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), tensorShape); } BOOST_AUTO_TEST_CASE(QuantizeFullyConnected) @@ -1298,28 +1026,6 @@ BOOST_AUTO_TEST_CASE(QuantizeFullyConnectedBiasEnabled) void TestQuantizeConvolution2d(bool useBiases) { - class TestConv2dQuantization : public TestQuantization - { - public: - TestConv2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestConv2dQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitConvolution2dLayer(const IConnectableLayer *layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char *name = nullptr) override - { - IgnoreUnused(convolution2dDescriptor, name); - TestQuantizationOnLayersWithBiases(layer, weights, biases); - } - }; - INetworkPtr network = INetwork::Create(); TensorShape shape{3U}; @@ -1352,24 +1058,7 @@ void TestQuantizeConvolution2d(bool useBiases) input0->GetOutputSlot(0).SetTensorInfo(info); conv2d->GetOutputSlot(0).SetTensorInfo(info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestConv2dQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestConv2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestConv2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions Qsymm16Options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); - TestConv2dQuantization validatorQSymmS16(Qsymm16Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeConvolution2d) @@ -1384,28 +1073,6 @@ BOOST_AUTO_TEST_CASE(QuantizeConvolution2dWithBiases) void TestQuantizeDepthwiseConvolution2d(bool useBiases) { - class TestDepthwiseConv2dQuantization : public TestQuantization - { - public: - TestDepthwiseConv2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestDepthwiseConv2dQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer, - const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional& biases, - const char *name = nullptr) override - { - IgnoreUnused(convolution2dDescriptor, name); - TestQuantizationOnLayersWithBiases(layer, weights, biases); - } - }; - INetworkPtr network = INetwork::Create(); TensorShape shape{3U}; @@ -1438,24 +1105,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases) input0->GetOutputSlot(0).SetTensorInfo(info); depthwiseConv2d->GetOutputSlot(0).SetTensorInfo(info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestDepthwiseConv2dQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestDepthwiseConv2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestDepthwiseConv2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions Qsymm16Options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); - TestDepthwiseConv2dQuantization validatorQSymmS16(Qsymm16Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeDepthwiseConvolution2d) @@ -1470,35 +1120,8 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthwiseConvolution2dWithBiases) BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization) { - class TestInstanceNormalizationQuantization : public TestQuantization - { - public: - TestInstanceNormalizationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestInstanceNormalizationQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - virtual void VisitInstanceNormalizationLayer(const IConnectableLayer* layer, - const InstanceNormalizationDescriptor& descriptor, - const char* name = nullptr) - { - IgnoreUnused(descriptor, name); - const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); - - const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; - const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; - - TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); - } - }; - - const TensorShape tensorShape{ 1, 4, 4, 1 }; - const TensorInfo tensorInfo(tensorShape, DataType::Float32); + const TensorShape shape{ 1, 4, 4, 1 }; + const TensorInfo tensorInfo(shape, DataType::Float32); INetworkPtr network = INetwork::Create(); @@ -1512,59 +1135,11 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization) inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); instanceNormLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestInstanceNormalizationQuantization validatorQAsymmU8(tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - //test QAsymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestInstanceNormalizationQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestInstanceNormalizationQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QSymmS16 quantization - const QuantizerOptions qSymmS16Options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16Options)->ExportNetwork(); - TestInstanceNormalizationQuantization validatorQSymmS16(qSymmS16Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax) { - class TestLogSoftmaxQuantization : public TestQuantization - { - public: - TestLogSoftmaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestLogSoftmaxQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitLogSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; - const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; - - TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); - } - }; - const TensorShape tensorShape{ 1U }; const TensorInfo tensorInfo(tensorShape, DataType::Float32); @@ -1583,28 +1158,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax) inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); logSoftmaxLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestLogSoftmaxQuantization validatorQAsymmU8(tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - // test QAsymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestLogSoftmaxQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestLogSoftmaxQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QuantisedSymmS16 quantization - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestLogSoftmaxQuantization validatorQSymmS16(qSymmS16options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), tensorShape); } INetworkPtr CreateNetworkWithSoftmaxLayer(const SoftmaxDescriptor& descriptor, const TensorShape& shape) @@ -1630,57 +1184,13 @@ INetworkPtr CreateNetworkWithSoftmaxLayer(const SoftmaxDescriptor& descriptor, c BOOST_AUTO_TEST_CASE(QuantizeSoftmax) { - class TestSoftmaxQuantization : public TestQuantization - { - public: - TestSoftmaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestSoftmaxQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitSoftmaxLayer(const IConnectableLayer* layer, - const SoftmaxDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off default static range [0.0f, 1.0f] - TestQuantizationParams(info, {1.0f / g_AsymmU8QuantizationBase, 0}, - {1.0f / g_AsymmS8QuantizationBase, -128}, - {1.0f / g_SymmS8QuantizationBase, 0}, - {1.0f / g_SymmS16QuantizationBase, 0}); - } - }; - SoftmaxDescriptor descriptor; descriptor.m_Beta = 1.0f; const TensorShape shape{1U}; INetworkPtr network = CreateNetworkWithSoftmaxLayer(descriptor, shape); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestSoftmaxQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestSoftmaxQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestSoftmaxQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestSoftmaxQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeStandIn) @@ -1763,26 +1273,6 @@ void CompleteLeakyReluNetwork(INetwork* network, BOOST_AUTO_TEST_CASE(QuantizePermute) { - class TestPermuteQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestPermuteQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestPermuteQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - void VisitPermuteLayer(const IConnectableLayer* layer, - const PermuteDescriptor& desc, - const char* name = nullptr) override - { - IgnoreUnused(desc, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{1U}; @@ -1796,48 +1286,11 @@ BOOST_AUTO_TEST_CASE(QuantizePermute) CompleteLeakyReluNetwork(network.get(), activation, permute, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestPermuteQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestPermuteQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestPermuteQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestPermuteQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch) { - class TestSpaceToBatchQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestSpaceToBatchQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestSpaceToBatchQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, - const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name = nullptr) override - { - IgnoreUnused(spaceToBatchNdDescriptor, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{1U}; @@ -1851,54 +1304,11 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch) CompleteLeakyReluNetwork(network.get(), activation, spaceToBatch, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestSpaceToBatchQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestSpaceToBatchQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestSpaceToBatchQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestSpaceToBatchQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth) { - class TestSpaceToDepthQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestSpaceToDepthQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) - {} - - TestSpaceToDepthQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) - {} - - void VisitSpaceToDepthLayer(const IConnectableLayer* layer, - const SpaceToDepthDescriptor&, - const char* = nullptr) override - { - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - TestQuantizationParams(info, - { 30.0f / g_AsymmU8QuantizationBase, 128 }, - { 30.0f / g_AsymmS8QuantizationBase, 0 }, - { 15.0f / g_SymmS8QuantizationBase, 0 }, - { 15.0f / g_SymmS16QuantizationBase, 0 }); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{ 1u }; @@ -1909,48 +1319,11 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth) CompleteLeakyReluNetwork(network.get(), activation, spaceToDepth, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestSpaceToDepthQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestSpaceToDepthQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestSpaceToDepthQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestSpaceToDepthQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizePooling2d) { - class TestPooling2dQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestPooling2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestPooling2dQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - void VisitPooling2dLayer(const IConnectableLayer* layer, - const Pooling2dDescriptor& desc, - const char* name = nullptr) override - { - IgnoreUnused(desc, name); - CheckForwardedQuantizationSettings(layer); - } - }; - auto network = INetwork::Create(); TensorShape shape{1U}; @@ -1978,54 +1351,11 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d) activation->GetOutputSlot(0).SetTensorInfo(info); pooling2d->GetOutputSlot(0).SetTensorInfo(info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestPooling2dQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestPooling2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestPooling2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestPooling2dQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeConstant) { - class TestConstantQuantization : public TestAdditionQuantization - { - public: - TestConstantQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestAdditionQuantization(inputShape, outputShape) {} - - TestConstantQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestAdditionQuantization(options, inputShape, outputShape) {} - - void VisitConstantLayer(const IConnectableLayer* layer, - const ConstTensor& input, - const char* name = nullptr) override - { - IgnoreUnused(input, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f] - TestQuantizationParams(info, {8.0f / g_AsymmU8QuantizationBase, 64}, - {8.0f / g_AsymmS8QuantizationBase, -64}, - {6.0f / g_SymmS8QuantizationBase, 0}, - {6.0f / g_SymmS16QuantizationBase, 0}); - } - }; - INetworkPtr network = INetwork::Create(); // Constant layer data @@ -2050,68 +1380,11 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant) addition->GetOutputSlot(0).SetTensorInfo(tensorInfo); constant->GetOutputSlot(0).SetTensorInfo(tensorInfo); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestConstantQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestConstantQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestConstantQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestConstantQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeArgMinMax) { - class TestArgMinMaxQuantization : public TestQuantization - { - public: - TestArgMinMaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape) : - TestQuantization(inputShape, outputShape) {} - - TestArgMinMaxQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) : - TestQuantization(options, inputShape, outputShape) - {} - - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(layer, id, name); - } - - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(layer, id, name); - } - void VisitArgMinMaxLayer(const IConnectableLayer* layer, - const ArgMinMaxDescriptor& argMinMaxDescriptor, - const char* name = nullptr) override - { - IgnoreUnused(argMinMaxDescriptor, name); - TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); - - TestQuantizationParams(outputInfo, - { 30.0f / g_AsymmU8QuantizationBase, 128 }, - { 30.0f / g_AsymmS8QuantizationBase, 0}, - { 15.0f / g_SymmS8QuantizationBase, 0}, - { 15.0f / g_SymmS16QuantizationBase, 0 }); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape inputShape{ 1, 1, 1, 5 }; @@ -2139,55 +1412,11 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax) input->GetOutputSlot(0).SetTensorInfo(inputInfo); argMinMaxLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestArgMinMaxQuantization validatorQAsymmU8(inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQAsymmS8(qAsymmS8Options, inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQSymmS8(qSymmS8Options, inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestArgMinMaxQuantization validatorQSymmS16(qSymmS16options, inputShape, outputShape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), inputShape, outputShape); } BOOST_AUTO_TEST_CASE(QuantizeComparison) { - class TestComparisonQuantization : public TestQuantization - { - public: - TestComparisonQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} - - TestComparisonQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} - - void VisitComparisonLayer(const IConnectableLayer* layer, - const ComparisonDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - - const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; - const OffsetScalePair qAsymmS8Params { 30.0f / g_AsymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0}; - const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; - - TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); - } - }; - const TensorShape tensorShape{ 1u }; const TensorInfo tensorInfo(tensorShape, DataType::Float32); @@ -2207,28 +1436,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison) inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo); comparisonLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestComparisonQuantization validatorQAsymmU8(tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - // test QAsymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestComparisonQuantization validatorQAsymmS8(qAsymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestComparisonQuantization validatorQSymmS8(qSymmS8Options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QuantisedSymmS16 quantization - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestComparisonQuantization validatorQSymmS16(qSymmS16options, tensorShape, tensorShape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), tensorShape); } BOOST_AUTO_TEST_CASE(QuantizeConcat) @@ -2244,38 +1452,42 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat) const TensorShape& outputShape) : TestQuantization(options, inputShape, outputShape) {} - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(layer, id, name); - } - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(layer, id, name); - } - void VisitConcatLayer(const IConnectableLayer* layer, - const OriginsDescriptor& originsDescriptor, - const char* name = nullptr) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override { - IgnoreUnused(originsDescriptor, name); - TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); - TestQuantizationParams( - outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65}, + IgnoreUnused(name, constants, id, descriptor); + + switch (layer->GetType()) + { + case armnn::LayerType::Input : + break; + case armnn::LayerType::Output : + break; + case armnn::LayerType::Concat : + { + TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); + TestQuantizationParams( + outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65}, {60.8f / g_SymmS8QuantizationBase, -63}, {45.3f / g_SymmS8QuantizationBase, 0}, {45.3f / g_SymmS16QuantizationBase, 0}); - TensorInfo inputInfo0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); - TensorInfo inputInfo1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo(); - TensorInfo inputInfo2 = layer->GetInputSlot(2).GetConnection()->GetTensorInfo(); + TensorInfo inputInfo0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + TensorInfo inputInfo1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo(); + TensorInfo inputInfo2 = layer->GetInputSlot(2).GetConnection()->GetTensorInfo(); - TestDifferentQuantizationScale(inputInfo0, inputInfo1); - TestDifferentQuantizationScale(inputInfo0, inputInfo2); - TestDifferentQuantizationScale(inputInfo1, inputInfo2); - TestDifferentQuantizationScale(inputInfo0, outputInfo); + TestDifferentQuantizationScale(inputInfo0, inputInfo1); + TestDifferentQuantizationScale(inputInfo0, inputInfo2); + TestDifferentQuantizationScale(inputInfo1, inputInfo2); + TestDifferentQuantizationScale(inputInfo0, outputInfo); + break; + } + default: + {} + } } }; @@ -2341,26 +1553,6 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat) BOOST_AUTO_TEST_CASE(QuantizeReshape) { - class TestReshapeQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestReshapeQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestReshapeQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - virtual void VisitReshapeLayer(const IConnectableLayer* layer, - const ReshapeDescriptor& reshapeDescriptor, - const char* name = nullptr) override - { - IgnoreUnused(reshapeDescriptor, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{1U}; @@ -2374,48 +1566,11 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape) CompleteLeakyReluNetwork(network.get(), activation, reshape, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestReshapeQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestReshapeQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestReshapeQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestReshapeQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeSplitter) { - class TestSplitterQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestSplitterQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestSplitterQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - virtual void VisitSplitterLayer(const IConnectableLayer* layer, - const SplitterDescriptor& desc, - const char* name = nullptr) - { - IgnoreUnused(desc, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{3U}; @@ -2428,50 +1583,11 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter) IConnectableLayer* splitter = network->AddSplitterLayer(splitterDesc); CompleteLeakyReluNetwork(network.get(), activation, splitter, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestSplitterQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestSplitterQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestSplitterQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestSplitterQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeResize) { - class TestResizeQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestResizeQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) - {} - - TestResizeQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) - {} - - void VisitResizeLayer(const IConnectableLayer* layer, - const ResizeDescriptor& resizeDescriptor, - const char* name = nullptr) override - { - IgnoreUnused(resizeDescriptor, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{1U}; @@ -2487,48 +1603,11 @@ BOOST_AUTO_TEST_CASE(QuantizeResize) CompleteLeakyReluNetwork(network.get(), activation, resizeLayer, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestResizeQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestResizeQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestResizeQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestResizeQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeStridedSlice) { - class TestStridedSliceQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestStridedSliceQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestStridedSliceQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - virtual void VisitStridedSliceLayer(const IConnectableLayer* layer, - const StridedSliceDescriptor& desc, - const char* name = nullptr) - { - IgnoreUnused(desc, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{3U}; @@ -2542,48 +1621,11 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice) CompleteLeakyReluNetwork(network.get(), activation, stridedSlice, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestStridedSliceQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestStridedSliceQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestStridedSliceQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestStridedSliceQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace) { - class TestBatchToSpaceQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestBatchToSpaceQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestBatchToSpaceQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, - const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, - const char* name = nullptr) override - { - IgnoreUnused(batchToSpaceNdDescriptor, name); - CheckForwardedQuantizationSettings(layer); - } - }; - INetworkPtr network = INetwork::Create(); const TensorShape shape{1U}; @@ -2597,24 +1639,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace) CompleteLeakyReluNetwork(network.get(), activation, batchToSpace, info); - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestBatchToSpaceQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestBatchToSpaceQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestBatchToSpaceQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestBatchToSpaceQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizePrelu) @@ -2637,52 +1662,59 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu) , m_AlphaShape(alphaShape) {} - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override { - IgnoreUnused(id, name); - const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); + IgnoreUnused(name, constants, id, descriptor); - switch (id) + switch (layer->GetType()) { - case 0: // Input - BOOST_TEST(m_InputShape == info.GetShape()); - break; - case 1: // Alpha - BOOST_TEST(m_AlphaShape == info.GetShape()); - break; - default: - throw InvalidArgumentException("Invalid layer binding id for PReLU layer"); + case armnn::LayerType::Input : + { + const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); + + switch (id) + { + case 0: // Input + BOOST_TEST(m_InputShape == info.GetShape()); + break; + case 1: // Alpha + BOOST_TEST(m_AlphaShape == info.GetShape()); + break; + default: + throw InvalidArgumentException("Invalid layer binding id for PReLU layer"); + } + + // Based off current default [-15.0f, 15.0f] + TestQuantizationParams(info, + { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8 + { 30.0f / g_AsymmS8QuantizationBase, 0}, // QASymmS8 + { 15.0f / g_SymmS8QuantizationBase, 0}, // QSymmS8 + { 15.0f / g_SymmS16QuantizationBase, 0 }); // QSymmS16 + break; + } + case armnn::LayerType::Output : + { + const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + BOOST_TEST(m_OutputShape == info.GetShape()); + break; + } + case armnn::LayerType::Prelu : + { + const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); + TestQuantizationParams(info, + { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8 + { 30.0f / g_AsymmS8QuantizationBase, 0}, // QAsymmS8 + { 15.0f / g_SymmS8QuantizationBase, 0}, // QSymmS8 + { 15.0f / g_SymmS16QuantizationBase, 0 }); // QSymmS16 + break; + } + default: + {} } - - // Based off current default [-15.0f, 15.0f] - TestQuantizationParams(info, - { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8 - { 30.0f / g_AsymmS8QuantizationBase, 0}, // QASymmS8 - { 15.0f / g_SymmS8QuantizationBase, 0}, // QSymmS8 - { 15.0f / g_SymmS16QuantizationBase, 0 }); // QSymmS16 - } - - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(id, name); - const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); - BOOST_TEST(m_OutputShape == info.GetShape()); - } - - void VisitPreluLayer(const IConnectableLayer* layer, - const char* name = nullptr) override - { - IgnoreUnused(name); - const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); - TestQuantizationParams(info, - { 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8 - { 30.0f / g_AsymmS8QuantizationBase, 0}, // QAsymmS8 - { 15.0f / g_SymmS8QuantizationBase, 0}, // QSymmS8 - { 15.0f / g_SymmS16QuantizationBase, 0 }); // QSymmS16 } private: @@ -2740,30 +1772,6 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu) void TestQuantizeTransposeConvolution2d(bool useBiases) { - class TestTransposeConvolution2dQuantization : public TestQuantization - { - public: - TestTransposeConvolution2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape) : - TestQuantization(inputShape, outputShape) - {} - - TestTransposeConvolution2dQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) : - TestQuantization(options, inputShape, outputShape) - {} - - void VisitTransposeConvolution2dLayer(const IConnectableLayer *layer, - const TransposeConvolution2dDescriptor& descriptor, - const ConstTensor& weights, - const Optional& biases, - const char *name = nullptr) override - { - IgnoreUnused(descriptor, name); - TestQuantizationOnLayersWithBiases(layer, weights, biases); - } - }; - INetworkPtr network = INetwork::Create(); TensorShape shape{ 3 }; @@ -2794,28 +1802,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases) input->GetOutputSlot(0).SetTensorInfo(info); transposeConv2d->GetOutputSlot(0).SetTensorInfo(info); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestTransposeConvolution2dQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - //test QAsymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestTransposeConvolution2dQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestTransposeConvolution2dQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QSymmS16 quantization - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestTransposeConvolution2dQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } BOOST_AUTO_TEST_CASE(QuantizeTransposeConvolution2d) @@ -2835,38 +1822,45 @@ BOOST_AUTO_TEST_CASE(QuantizeStack) public: TestStackQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) {} + : TestQuantization(inputShape, outputShape) {} TestStackQuantization(const QuantizerOptions& options, const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) {} + : TestQuantization(options, inputShape, outputShape) {} - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(layer, id, name); - } - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override { - IgnoreUnused(layer, id, name); - } + IgnoreUnused(name, constants, id, descriptor); - void VisitStackLayer(const IConnectableLayer* layer, - const StackDescriptor& descriptor, - const char* name = nullptr) override - { - IgnoreUnused(descriptor, name); - TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); - - TestQuantizationParams(outputInfo, - { 30.0f / g_AsymmU8QuantizationBase, 128 }, - { 30.0f / g_AsymmS8QuantizationBase, 0}, - { 15.0f / g_SymmS8QuantizationBase, 0}, - { 15.0f / g_SymmS16QuantizationBase, 0 }); + switch (layer->GetType()) + { + case armnn::LayerType::Input : + { + break; + } + case armnn::LayerType::Output : + { + break; + } + case armnn::LayerType::Stack : + { + TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); + + TestQuantizationParams(outputInfo, + { 30.0f / g_AsymmU8QuantizationBase, 128 }, + { 30.0f / g_AsymmS8QuantizationBase, 0}, + { 15.0f / g_SymmS8QuantizationBase, 0}, + { 15.0f / g_SymmS16QuantizationBase, 0 }); + break; + } + default: + {} + } } }; @@ -2909,35 +1903,6 @@ BOOST_AUTO_TEST_CASE(QuantizeStack) BOOST_AUTO_TEST_CASE(QuantizeSlice) { - class TestSliceQuantization : public TestQuantization - { - public: - TestSliceQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestQuantization(inputShape, outputShape) - {} - - TestSliceQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestQuantization(options, inputShape, outputShape) - {} - - virtual void VisitSliceLayer(const IConnectableLayer* layer, - const SliceDescriptor& desc, - const char* name = nullptr) - { - IgnoreUnused(desc, name); - const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); - - const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 }; - const OffsetScalePair qAsymmS8Params{ 30.0f / g_AsymmS8QuantizationBase, 0 }; - const OffsetScalePair qSymmS8Params { 15.0f / g_SymmS8QuantizationBase, 0 }; - const OffsetScalePair qSymmS16Params{ 15.0f / g_SymmS16QuantizationBase, 0 }; - - TestQuantizationParams(info, qAsymmU8Params, qAsymmS8Params, qSymmS8Params, qSymmS16Params); - } - }; - TensorShape shape{ 3 }; TensorInfo info(shape, DataType::Float32); @@ -2953,28 +1918,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice) inputLayer->GetOutputSlot(0).SetTensorInfo(info); sliceLayer->GetOutputSlot(0).SetTensorInfo(info); - // test QAsymmU8 quantization - INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestSliceQuantization validatorQAsymmU8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); - - // test QASymmS8 quantization - const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); - INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); - TestSliceQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); - - // test QSymmS8 quantization - const QuantizerOptions qSymmS8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); - TestSliceQuantization validatorQSymmS8(qSymmS8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); - - // test QSymmS16 quantization - const QuantizerOptions qSymmS16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); - TestSliceQuantization validatorQSymmS16(qSymmS16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); + TestNetwork(network.get(), shape); } std::vector SetupQuantize(float value) @@ -3002,50 +1946,55 @@ BOOST_AUTO_TEST_CASE(QuantizeNegativeInf) BOOST_CHECK_EQUAL(SetupQuantize(-1 * std::numeric_limits::infinity())[0], 0); } -class TestPreserveType : public TestAdditionQuantization +class TestPreserveType : public TestQuantization { public: TestPreserveType(const QuantizerOptions& options, const DataType& dataType, const TensorShape& inputShape, const TensorShape& outputShape) - : TestAdditionQuantization(options, inputShape, outputShape) + : TestQuantization(options, inputShape, outputShape) , m_DataType(dataType) , m_VisitedQuantizeLayer(false) , m_VisitedDequantizeLayer(false) {} - void VisitInputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(id, name); - const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); - BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType)); - BOOST_TEST(m_InputShape == info.GetShape()); - } - - void VisitOutputLayer(const IConnectableLayer* layer, - LayerBindingId id, - const char* name = nullptr) override - { - IgnoreUnused(id, name); - const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); - BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType)); - BOOST_TEST(m_OutputShape == info.GetShape()); - } - - void VisitQuantizeLayer(const IConnectableLayer* layer, - const char* name = nullptr) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override { - IgnoreUnused(layer, name); - m_VisitedQuantizeLayer = true; - } + IgnoreUnused(name, constants, id, descriptor); - void VisitDequantizeLayer(const IConnectableLayer* layer, - const char* name = nullptr) override - { - IgnoreUnused(layer, name); - m_VisitedDequantizeLayer = true; + switch (layer->GetType()) + { + case armnn::LayerType::Input : + { + const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo(); + BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType)); + BOOST_TEST(m_InputShape == info.GetShape()); + break; + } + case armnn::LayerType::Output : + { + const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType)); + BOOST_TEST(m_OutputShape == info.GetShape()); + break; + } + case armnn::LayerType::Quantize : + { + m_VisitedQuantizeLayer = true; + break; + } + case armnn::LayerType::Dequantize : + { + m_VisitedDequantizeLayer = true; + break; + } + default: + {} + } } void CheckQuantizeDequantizeLayerVisited(bool expected) @@ -3119,39 +2068,52 @@ BOOST_AUTO_TEST_CASE(PreserveTypeQsymm16) BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) { - class TestConnectionPreservation : public LayerVisitorBase + class TestConnectionPreservation : public IStrategy { public: TestConnectionPreservation(const Graph& graph) - : LayerVisitorBase() - , m_Graph(graph) + : m_Graph(graph) {} - void VisitAdditionLayer(const IConnectableLayer* layer, const char*) override - { - CheckLayerName(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), "reLU1"); - CheckLayerName(layer->GetInputSlot(1).GetConnection()->GetOwningLayerGuid(), "reLU2"); - } + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override + { + IgnoreUnused(name, constants, id, descriptor); - void CheckLayerName(LayerGuid guid, std::string expectedName) + switch (layer->GetType()) { - bool guidFound = false; - for (Layer* layer : m_Graph) + case armnn::LayerType::Addition : { - if (layer->GetGuid() == guid) - { - BOOST_CHECK_EQUAL(layer->GetName(), expectedName.c_str()); - guidFound = true; - break; - } + CheckLayerName(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), "reLU1"); + CheckLayerName(layer->GetInputSlot(1).GetConnection()->GetOwningLayerGuid(), "reLU2"); + break; } - if (!guidFound) + default: + {} + } + } + + void CheckLayerName(LayerGuid guid, std::string expectedName) + { + bool guidFound = false; + for (Layer* layer : m_Graph) + { + if (layer->GetGuid() == guid) { - BOOST_FAIL("No layer matching the GUID was found"); + BOOST_CHECK_EQUAL(layer->GetName(), expectedName.c_str()); + guidFound = true; + break; } } - - private: + if (!guidFound) + { + BOOST_FAIL("No layer matching the GUID was found"); + } + } + private: Graph m_Graph; }; @@ -3177,8 +2139,8 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) reLULayer2->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32)); addLayer1->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32)); - TestConnectionPreservation visitor1(PolymorphicDowncast(network.get())->GetGraph()); - VisitLayersTopologically(network.get(), visitor1); + TestConnectionPreservation strategy1(PolymorphicDowncast(network.get())->GetGraph()); + VisitLayersTopologically(network.get(), strategy1); armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(network.get()); @@ -3193,8 +2155,8 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) INetworkPtr quantNetwork = quantizer->ExportNetwork(); - TestConnectionPreservation visitor2(PolymorphicDowncast(quantNetwork.get())->GetGraph()); - VisitLayersTopologically(quantNetwork.get(), visitor2); + TestConnectionPreservation strategy2(PolymorphicDowncast(quantNetwork.get())->GetGraph()); + VisitLayersTopologically(quantNetwork.get(), strategy2); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnDeserializer/test/DeserializeReduceSum.cpp b/src/armnnDeserializer/test/DeserializeReduceSum.cpp index d88613e593..326560ff22 100644 --- a/src/armnnDeserializer/test/DeserializeReduceSum.cpp +++ b/src/armnnDeserializer/test/DeserializeReduceSum.cpp @@ -8,7 +8,6 @@ #include "../Deserializer.hpp" #include -#include BOOST_AUTO_TEST_SUITE(Deserializer) diff --git a/src/armnnQuantizer/ArmNNQuantizerMain.cpp b/src/armnnQuantizer/ArmNNQuantizerMain.cpp index 219363edbb..49652efe25 100644 --- a/src/armnnQuantizer/ArmNNQuantizerMain.cpp +++ b/src/armnnQuantizer/ArmNNQuantizerMain.cpp @@ -61,8 +61,8 @@ int main(int argc, char* argv[]) if (!dataSet.IsEmpty()) { // Get the Input Tensor Infos - armnnQuantizer::InputLayerVisitor inputLayerVisitor; - network->Accept(inputLayerVisitor); + armnnQuantizer::InputLayerStrategy inputLayerStrategy; + network->ExecuteStrategy(inputLayerStrategy); for (armnnQuantizer::QuantizationInput quantizationInput : dataSet) { @@ -72,7 +72,7 @@ int main(int argc, char* argv[]) unsigned int count = 0; for (armnn::LayerBindingId layerBindingId : quantizationInput.GetLayerBindingIds()) { - armnn::TensorInfo tensorInfo = inputLayerVisitor.GetTensorInfo(layerBindingId); + armnn::TensorInfo tensorInfo = inputLayerStrategy.GetTensorInfo(layerBindingId); inputData[count] = quantizationInput.GetDataForEntry(layerBindingId); armnn::ConstTensor inputTensor(tensorInfo, inputData[count].data()); inputTensors.push_back(std::make_pair(layerBindingId, inputTensor)); diff --git a/src/armnnQuantizer/QuantizationDataSet.cpp b/src/armnnQuantizer/QuantizationDataSet.cpp index acd301a470..99fc021a51 100644 --- a/src/armnnQuantizer/QuantizationDataSet.cpp +++ b/src/armnnQuantizer/QuantizationDataSet.cpp @@ -47,6 +47,36 @@ QuantizationDataSet::~QuantizationDataSet() { } + +/// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine. + +void InputLayerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) +{ + armnn::IgnoreUnused(name, descriptor, constants); + + m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo()); +} + + + + +armnn::TensorInfo InputLayerStrategy::GetTensorInfo(armnn::LayerBindingId layerBindingId) +{ + auto iterator = m_TensorInfos.find(layerBindingId); + if (iterator != m_TensorInfos.end()) + { + return m_TensorInfos.at(layerBindingId); + } + else + { + throw armnn::Exception("Could not retrieve tensor info for binding ID " + std::to_string(layerBindingId)); + } +} + void InputLayerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, armnn::LayerBindingId id, const char* name) diff --git a/src/armnnQuantizer/QuantizationDataSet.hpp b/src/armnnQuantizer/QuantizationDataSet.hpp index 3a97630ccf..47b893a7f7 100644 --- a/src/armnnQuantizer/QuantizationDataSet.hpp +++ b/src/armnnQuantizer/QuantizationDataSet.hpp @@ -42,6 +42,22 @@ private: std::string m_CsvFilePath; }; +/// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine. +class InputLayerStrategy : public armnn::IStrategy +{ +public: + virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override; + + armnn::TensorInfo GetTensorInfo(armnn::LayerBindingId); +private: + std::map m_TensorInfos; +}; + + /// Visitor class implementation to gather the TensorInfo for LayerBindingID for creation of ConstTensor for Refine. class InputLayerVisitor : public armnn::LayerVisitorBase { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 28afac7b62..bcdaa087fb 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: MIT // #include "Serializer.hpp" +#include "SerializerUtils.hpp" #include #include @@ -10,9 +11,9 @@ #include #include +#include #include -#include "SerializerUtils.hpp" using namespace armnn; namespace fb = flatbuffers; @@ -95,7 +96,7 @@ serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFun } } -uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid) +uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid) { if (m_guidMap.empty()) { @@ -112,7 +113,7 @@ uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid) } // Build FlatBuffer for Input Layer -void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name) +void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name) { IgnoreUnused(name); @@ -134,7 +135,8 @@ void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, L } // Build FlatBuffer for Output Layer -void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name) +void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer, + LayerBindingId id, const char* name) { IgnoreUnused(name); @@ -154,7 +156,7 @@ void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer); } -void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs); @@ -164,9 +166,9 @@ void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, con } // Build FlatBuffer for Activation Layer -void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* layer, - const armnn::ActivationDescriptor& descriptor, - const char* name) +void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer, + const armnn::ActivationDescriptor& descriptor, + const char* name) { IgnoreUnused(name); @@ -189,7 +191,7 @@ void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* lay } // Build FlatBuffer for Addition Layer -void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -204,9 +206,9 @@ void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer } // Build FlatBuffer for ArgMinMax Layer -void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer, - const armnn::ArgMinMaxDescriptor& descriptor, - const char *name) +void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer, + const armnn::ArgMinMaxDescriptor& descriptor, + const char *name) { IgnoreUnused(name); @@ -227,9 +229,9 @@ void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *laye } // Build FlatBuffer for BatchToSpaceNd Layer -void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer, - const armnn::BatchToSpaceNdDescriptor& descriptor, - const char* name) +void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer, + const armnn::BatchToSpaceNdDescriptor& descriptor, + const char* name) { IgnoreUnused(name); @@ -257,16 +259,19 @@ void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer); } -void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer, - const armnn::BatchNormalizationDescriptor& batchNormDescriptor, - const armnn::ConstTensor& mean, - const armnn::ConstTensor& variance, - const armnn::ConstTensor& beta, - const armnn::ConstTensor& gamma, - const char* name) +void SerializerStrategy::SerializeBatchNormalizationLayer( + const armnn::IConnectableLayer* layer, + const armnn::BatchNormalizationDescriptor& batchNormDescriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); + const armnn::ConstTensor& mean = constants[0]; + const armnn::ConstTensor& variance = constants[1]; + const armnn::ConstTensor& beta = constants[2]; + const armnn::ConstTensor& gamma = constants[3]; + auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization); auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor( m_flatBufferBuilder, @@ -288,7 +293,7 @@ void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLa CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer); } -void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer, const armnn::ComparisonDescriptor& descriptor, const char* name) { @@ -304,12 +309,14 @@ void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* lay } // Build FlatBuffer for Constant Layer -void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer, - const armnn::ConstTensor& input, - const char* name) +void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer, + const std::vector& constants, + const char* name) { IgnoreUnused(name); + armnn::ConstTensor input = constants[0]; + // Create FlatBuffer BaseLayer auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant); @@ -325,14 +332,15 @@ void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer } // Build FlatBuffer for Convolution2dLayer -void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* layer, - const armnn::Convolution2dDescriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) +void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer, + const armnn::Convolution2dDescriptor& descriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); + const armnn::ConstTensor weights = constants[0]; + // Create FlatBuffer BaseLayer auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d); @@ -350,9 +358,10 @@ void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights); flatbuffers::Offset flatBufferBiasesConstTensorInfo; - if (biases.has_value()) + if (constants.size() > 1) { - flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases.value()); + const armnn::ConstTensor biases = constants[1]; + flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases); } // Create the FlatBuffer Convolution2dLayer @@ -366,7 +375,7 @@ void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer); } -void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer, const armnn::DepthToSpaceDescriptor& descriptor, const char* name) { @@ -382,14 +391,15 @@ void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* l CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer); } -void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer, - const armnn::DepthwiseConvolution2dDescriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) +void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer, + const armnn::DepthwiseConvolution2dDescriptor& descriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); + const armnn::ConstTensor& weights = constants[0]; + auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d); auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder, descriptor.m_PadLeft, @@ -405,9 +415,11 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab flatbuffers::Offset fbWeightsConstTensorInfo = CreateConstTensorInfo(weights); flatbuffers::Offset fbBiasesConstTensorInfo; - if (biases.has_value()) + + if (constants.size() > 1) { - fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value()); + const armnn::ConstTensor& biases = constants[1]; + fbBiasesConstTensorInfo = CreateConstTensorInfo(biases); } auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder, @@ -419,7 +431,7 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer); } -void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -430,13 +442,15 @@ void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* lay CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer); } -void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, - const armnn::DetectionPostProcessDescriptor& descriptor, - const armnn::ConstTensor& anchors, - const char* name) +void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, + const armnn::DetectionPostProcessDescriptor& descriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); + const armnn::ConstTensor& anchors = constants[0]; + auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess); auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder, descriptor.m_MaxDetections, @@ -461,7 +475,7 @@ void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectable CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer); } -void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -471,7 +485,7 @@ void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer); } -void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer, const armnn::ElementwiseUnaryDescriptor& descriptor, const char* name) { @@ -486,7 +500,7 @@ void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLaye CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer); } -void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -496,7 +510,7 @@ void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, c CreateAnyLayer(fbEqualLayer.o, serializer::Layer::Layer_EqualLayer); } -void SerializerVisitor::VisitFillLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer, const armnn::FillDescriptor& fillDescriptor, const char* name) { @@ -511,7 +525,7 @@ void SerializerVisitor::VisitFillLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer); } -void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name) +void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name) { IgnoreUnused(name); @@ -521,14 +535,7 @@ void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, c CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer); } -void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, - const char* name) -{ - armnn::GatherDescriptor gatherDescriptor{}; - VisitGatherLayer(layer, gatherDescriptor, name); -} - -void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer, const armnn::GatherDescriptor& gatherDescriptor, const char* name) { @@ -542,7 +549,8 @@ void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer); } -void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name) + +void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -552,7 +560,7 @@ void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer); } -void SerializerVisitor::VisitInstanceNormalizationLayer( +void SerializerStrategy::SerializeInstanceNormalizationLayer( const armnn::IConnectableLayer* layer, const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor, const char* name) @@ -572,7 +580,7 @@ void SerializerVisitor::VisitInstanceNormalizationLayer( CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer); } -void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer, const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor, const char* name) { @@ -593,7 +601,7 @@ void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer); } -void SerializerVisitor::VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer, const armnn::LogicalBinaryDescriptor& descriptor, const char* name) { @@ -608,7 +616,7 @@ void SerializerVisitor::VisitLogicalBinaryLayer(const armnn::IConnectableLayer* CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer); } -void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer, const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor, const char* name) { @@ -632,10 +640,10 @@ void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* lay CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer); } -void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::LstmDescriptor& descriptor, - const armnn::LstmInputParams& params, - const char* name) +void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer, + const armnn::LstmDescriptor& descriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); @@ -651,16 +659,21 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer, descriptor.m_ProjectionEnabled, descriptor.m_LayerNormEnabled); - // Get mandatory input parameters - auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights); - auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights); - auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights); - auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights); - auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights); - auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights); - auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias); - auto cellBias = CreateConstTensorInfo(*params.m_CellBias); - auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias); + // Index for constants vector + std::size_t i = 0; + + // Get mandatory/basic input parameters + auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights + auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights + auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights + auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights + auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights + auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights + auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias + auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias + auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias + + //Define optional parameters, these will be set depending on configuration in Lstm descriptor flatbuffers::Offset inputToInputWeights; @@ -678,33 +691,36 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer, if (!descriptor.m_CifgEnabled) { - inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights); - recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights); - cellToInputWeights = CreateConstTensorInfo(*params.m_CellToInputWeights); - inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias); + inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights + recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights + inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias } - if (descriptor.m_ProjectionEnabled) + if (descriptor.m_PeepholeEnabled) { - projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights); - projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias); + if (!descriptor.m_CifgEnabled) + { + cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights + } + cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights + cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights } - if (descriptor.m_PeepholeEnabled) + if (descriptor.m_ProjectionEnabled) { - cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights); - cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights); + projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights + projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias } if (descriptor.m_LayerNormEnabled) { if (!descriptor.m_CifgEnabled) { - inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights)); + inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights } - forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights); - cellLayerNormWeights = CreateConstTensorInfo(*params.m_CellLayerNormWeights); - outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights); + forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights + cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights + outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights } auto fbLstmParams = serializer::CreateLstmInputParams( @@ -740,7 +756,7 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer); } -void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -750,7 +766,7 @@ void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer); } -void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer, const armnn::MeanDescriptor& descriptor, const char* name) { @@ -768,7 +784,7 @@ void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer); } -void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -778,7 +794,7 @@ void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer); } -void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -788,14 +804,14 @@ void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, c CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer); } -void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer, const armnn::MergerDescriptor& mergerDescriptor, const char* name) { - VisitConcatLayer(layer, mergerDescriptor, name); + SerializeConcatLayer(layer, mergerDescriptor, name); } -void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer, const armnn::ConcatDescriptor& concatDescriptor, const char* name) { @@ -830,7 +846,7 @@ void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer); } -void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -841,7 +857,7 @@ void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer); } -void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer, const armnn::PadDescriptor& padDescriptor, const char* name) { @@ -867,7 +883,7 @@ void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer); } -void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer, const armnn::PermuteDescriptor& permuteDescriptor, const char* name) { @@ -895,7 +911,7 @@ void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer, } // Build FlatBuffer for Rank Layer -void SerializerVisitor::VisitRankLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -905,9 +921,9 @@ void SerializerVisitor::VisitRankLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer); } -void SerializerVisitor::VisitReduceLayer(const armnn::IConnectableLayer* layer, - const armnn::ReduceDescriptor& reduceDescriptor, - const char*) +void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer, + const armnn::ReduceDescriptor& reduceDescriptor, + const char*) { auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce); auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder, @@ -922,7 +938,7 @@ void SerializerVisitor::VisitReduceLayer(const armnn::IConnectableLayer* layer, } // Build FlatBuffer for Reshape Layer -void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer, const armnn::ReshapeDescriptor& reshapeDescriptor, const char* name) { @@ -948,7 +964,7 @@ void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer); } -void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer, const armnn::ResizeBilinearDescriptor& resizeDescriptor, const char* name) { @@ -971,7 +987,7 @@ void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer); } -void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer, const armnn::ResizeDescriptor& resizeDescriptor, const char* name) { @@ -995,7 +1011,7 @@ void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer); } -void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -1005,7 +1021,7 @@ void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, c CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer); } -void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer, const armnn::SliceDescriptor& sliceDescriptor, const char* name) { @@ -1022,7 +1038,7 @@ void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer, } // Build FlatBuffer for Softmax Layer -void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer, const armnn::SoftmaxDescriptor& softmaxDescriptor, const char* name) { @@ -1044,7 +1060,7 @@ void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer); } -void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer, const armnn::Pooling2dDescriptor& pooling2dDescriptor, const char* name) { @@ -1073,7 +1089,7 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer); } -void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -1088,7 +1104,7 @@ void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer); } -void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name) +void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name) { IgnoreUnused(name); @@ -1099,14 +1115,15 @@ void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer } // Build FlatBuffer for FullyConnected Layer -void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer, - const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) +void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer, + const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); + const armnn::ConstTensor& weights = constants.at(0); + // Create FlatBuffer BaseLayer auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected); @@ -1123,7 +1140,8 @@ void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer* flatbuffers::Offset flatBufferBiases; if (fullyConnectedDescriptor.m_BiasEnabled) { - flatBufferBiases = CreateConstTensorInfo(biases.value()); + armnn::ConstTensor biases = constants.at(1); + flatBufferBiases = CreateConstTensorInfo(biases); } // Create FlatBuffer FullyConnectedLayer @@ -1138,7 +1156,7 @@ void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer* } // Build FlatBuffer for SpaceToBatchNd Layer -void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer, const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, const char* name) { @@ -1169,7 +1187,7 @@ void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* } // Build FlatBuffer for SpaceToDepthLayer -void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer, const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor, const char* name) { @@ -1189,7 +1207,7 @@ void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* l } // Build FlatBuffer for Splitter Layer -void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer, const armnn::ViewsDescriptor& viewsDescriptor, const char* name) { @@ -1255,7 +1273,7 @@ void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer); } -void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer, const armnn::NormalizationDescriptor& descriptor, const char* name) { @@ -1280,7 +1298,7 @@ void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer); } -void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer, const armnn::StackDescriptor& stackDescriptor, const char* name) { @@ -1303,7 +1321,7 @@ void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer); } -void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer, +void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer, const armnn::StandInDescriptor& standInDescriptor, const char *name) { @@ -1319,7 +1337,7 @@ void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer, CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer); } -void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer, const armnn::StridedSliceDescriptor& stridedSliceDescriptor, const char* name) { @@ -1346,7 +1364,7 @@ void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* l CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer); } -void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -1356,7 +1374,7 @@ void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* la CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer); } -void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name) +void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name) { IgnoreUnused(name); @@ -1366,15 +1384,16 @@ void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer); } -void SerializerVisitor::VisitTransposeConvolution2dLayer( +void SerializerStrategy::SerializeTransposeConvolution2dLayer( const armnn::IConnectableLayer* layer, const armnn::TransposeConvolution2dDescriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, + const std::vector& constants, const char* name) { IgnoreUnused(name); + const armnn::ConstTensor& weights = constants.at(0); + auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d); auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder, descriptor.m_PadLeft, @@ -1389,9 +1408,10 @@ void SerializerVisitor::VisitTransposeConvolution2dLayer( // weights & biases auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights); flatbuffers::Offset fbBiasesConstTensorInfo; - if (biases.has_value()) + if (constants.size() > 1) { - fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value()); + const armnn::ConstTensor& biases = constants.at(1); + fbBiasesConstTensorInfo = CreateConstTensorInfo(biases); } auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder, @@ -1403,7 +1423,7 @@ void SerializerVisitor::VisitTransposeConvolution2dLayer( CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer); } -void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* layer, +void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer, const armnn::TransposeDescriptor& descriptor, const char* name) { @@ -1430,10 +1450,10 @@ void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* laye CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer); } -void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::QLstmDescriptor& descriptor, - const armnn::LstmInputParams& params, - const char* name) +void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer, + const armnn::QLstmDescriptor& descriptor, + const std::vector& constants, + const char* name) { IgnoreUnused(name); @@ -1455,16 +1475,19 @@ void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer, descriptor.m_HiddenStateScale ); + // Index for constants vector + std::size_t i = 0; + // Mandatory params - auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights); - auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights); - auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights); - auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights); - auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights); - auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights); - auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias); - auto cellBias = CreateConstTensorInfo(*params.m_CellBias); - auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias); + auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights + auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights + auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights + auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights + auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights + auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights + auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias + auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias + auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias // CIFG flatbuffers::Offset inputToInputWeights; @@ -1473,19 +1496,9 @@ void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer, if (!descriptor.m_CifgEnabled) { - inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights); - recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights); - inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias); - } - - // Projectiom - flatbuffers::Offset projectionWeights; - flatbuffers::Offset projectionBias; - - if (descriptor.m_ProjectionEnabled) - { - projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights); - projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias); + inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights + recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights + inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias } // Peephole @@ -1497,11 +1510,20 @@ void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer, { if (!descriptor.m_CifgEnabled) { - cellToInputWeights = CreateConstTensorInfo(*params.m_CellToInputWeights); + cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights } + cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights + cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights + } - cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights); - cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights); + // Projection + flatbuffers::Offset projectionWeights; + flatbuffers::Offset projectionBias; + + if (descriptor.m_ProjectionEnabled) + { + projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights + projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias } // Layer norm @@ -1514,12 +1536,11 @@ void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer, { if (!descriptor.m_CifgEnabled) { - inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights)); + inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights } - - forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights); - cellLayerNormWeights = CreateConstTensorInfo(*params.m_CellLayerNormWeights); - outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights); + forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights + cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights + outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights } auto fbQLstmParams = serializer::CreateQLstmInputParams( @@ -1555,29 +1576,32 @@ void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer); } -void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::QuantizedLstmInputParams& params, - const char* name) +void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer, + const std::vector& constants, + const char* name) { IgnoreUnused(name); auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm); + // index for constants vector + size_t i = 0; + // Get input parameters - auto inputToInputWeights = CreateConstTensorInfo(params.GetInputToInputWeights()); - auto inputToForgetWeights = CreateConstTensorInfo(params.GetInputToForgetWeights()); - auto inputToCellWeights = CreateConstTensorInfo(params.GetInputToCellWeights()); - auto inputToOutputWeights = CreateConstTensorInfo(params.GetInputToOutputWeights()); + auto inputToInputWeights = CreateConstTensorInfo(constants[i++]); + auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); + auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); + auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); - auto recurrentToInputWeights = CreateConstTensorInfo(params.GetRecurrentToInputWeights()); - auto recurrentToForgetWeights = CreateConstTensorInfo(params.GetRecurrentToForgetWeights()); - auto recurrentToCellWeights = CreateConstTensorInfo(params.GetRecurrentToCellWeights()); - auto recurrentToOutputWeights = CreateConstTensorInfo(params.GetRecurrentToOutputWeights()); + auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); + auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); + auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); + auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); - auto inputGateBias = CreateConstTensorInfo(params.GetInputGateBias()); - auto forgetGateBias = CreateConstTensorInfo(params.GetForgetGateBias()); - auto cellBias = CreateConstTensorInfo(params.GetCellBias()); - auto outputGateBias = CreateConstTensorInfo(params.GetOutputGateBias()); + auto inputGateBias = CreateConstTensorInfo(constants[i++]); + auto forgetGateBias = CreateConstTensorInfo(constants[i++]); + auto cellBias = CreateConstTensorInfo(constants[i++]); + auto outputGateBias = CreateConstTensorInfo(constants[i++]); auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams( m_flatBufferBuilder, @@ -1602,7 +1626,7 @@ void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer* CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer); } -fb::Offset SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer, +fb::Offset SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer, const serializer::LayerType layerType) { @@ -1619,7 +1643,7 @@ fb::Offset SerializerVisitor::CreateLayerBase(const IConn m_flatBufferBuilder.CreateVector(outputSlots)); } -void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset& layer, const serializer::Layer serializerLayer) +void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset& layer, const serializer::Layer serializerLayer) { auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer); @@ -1627,7 +1651,7 @@ void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset& layer, c } template -flatbuffers::Offset> SerializerVisitor::CreateDataVector(const void* memory, unsigned int size) +flatbuffers::Offset> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size) { const T* buffer = reinterpret_cast(memory); std::vector vector(buffer, buffer + (size / sizeof(T))); @@ -1635,7 +1659,7 @@ flatbuffers::Offset> SerializerVisitor::CreateDataVector( return fbVector; } -flatbuffers::Offset SerializerVisitor::CreateTensorInfo(const armnn::TensorInfo& tensorInfo) +flatbuffers::Offset SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo) { // Get the dimensions std::vector shape; @@ -1674,7 +1698,7 @@ flatbuffers::Offset SerializerVisitor::CreateTensorInfo(const armnn } flatbuffers::Offset - SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor) + SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor) { armnn::TensorInfo tensorInfo = constTensor.GetInfo(); @@ -1724,7 +1748,7 @@ flatbuffers::Offset return flatBufferConstTensor; } -flatbuffers::Offset SerializerVisitor::GetVersionTable() +flatbuffers::Offset SerializerStrategy::GetVersionTable() { flatbuffers::Offset versionsTable = serializer::CreateFeatureCompatibilityVersions( @@ -1735,7 +1759,7 @@ flatbuffers::Offset SerializerVis } std::vector> - SerializerVisitor::CreateInputSlots(const armnn::IConnectableLayer* layer) + SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer) { std::vector> inputSlots; @@ -1757,7 +1781,7 @@ std::vector> } std::vector> - SerializerVisitor::CreateOutputSlots(const armnn::IConnectableLayer* layer) + SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer) { std::vector> outputSlots; @@ -1775,32 +1799,421 @@ std::vector> return outputSlots; } +void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer, + const BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) +{ + IgnoreUnused(constants); + + switch (layer->GetType()) + { + case armnn::LayerType::Activation : + { + const armnn::ActivationDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeActivationLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Addition : + { + SerializeAdditionLayer(layer, name); + break; + } + case armnn::LayerType::ArgMinMax : + { + const armnn::ArgMinMaxDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeArgMinMaxLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::BatchNormalization : + { + const armnn::BatchNormalizationDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeBatchNormalizationLayer(layer, + layerDescriptor, + constants, + name); + break; + } + case armnn::LayerType::BatchToSpaceNd : + { + const armnn::BatchToSpaceNdDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeBatchToSpaceNdLayer(layer, + layerDescriptor, + name); + break; + } + case armnn::LayerType::Comparison : + { + const armnn::ComparisonDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeComparisonLayer(layer, + layerDescriptor, + name); + break; + } + case armnn::LayerType::Concat : + { + const armnn::ConcatDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeConcatLayer(layer, + layerDescriptor, + name); + break; + } + case armnn::LayerType::Constant : + { + SerializeConstantLayer(layer, + constants, + name); + break; + } + case armnn::LayerType::Convolution2d : + { + const armnn::Convolution2dDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeConvolution2dLayer(layer, + layerDescriptor, + constants, + name); + break; + } + case armnn::LayerType::DepthToSpace : + { + const armnn::DepthToSpaceDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeDepthToSpaceLayer(layer, + layerDescriptor, + name); + break; + } + case armnn::LayerType::DepthwiseConvolution2d : + { + const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeDepthwiseConvolution2dLayer(layer, + layerDescriptor, + constants, + name); + break; + } + case armnn::LayerType::Dequantize : + { + SerializeDequantizeLayer(layer, + name); + break; + } + case armnn::LayerType::DetectionPostProcess : + { + const armnn::DetectionPostProcessDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name); + break; + } + case armnn::LayerType::Division : + { + SerializeDivisionLayer(layer, name); + break; + } + case armnn::LayerType::ElementwiseUnary : + { + const armnn::ElementwiseUnaryDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeElementwiseUnaryLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Fill : + { + const armnn::FillDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeFillLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Floor : + { + SerializeFloorLayer(layer, name); + break; + } + case armnn::LayerType::FullyConnected : + { + const armnn::FullyConnectedDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeFullyConnectedLayer(layer, layerDescriptor, constants, name); + break; + } + case armnn::LayerType::Gather : + { + const armnn::GatherDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeGatherLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Input: + { + SerializeInputLayer(layer, id, name); + break; + } + case armnn::LayerType::InstanceNormalization : + { + const armnn::InstanceNormalizationDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeInstanceNormalizationLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::L2Normalization : + { + const armnn::L2NormalizationDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeL2NormalizationLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::LogicalBinary : + { + const armnn::LogicalBinaryDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeLogicalBinaryLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::LogSoftmax : + { + const armnn::LogSoftmaxDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeLogSoftmaxLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Lstm : + { + const armnn::LstmDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeLstmLayer(layer, layerDescriptor, constants, name); + break; + } + case armnn::LayerType::QLstm : + { + const armnn::QLstmDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeQLstmLayer(layer, layerDescriptor, constants, name); + break; + } + case armnn::LayerType::Maximum : + { + SerializeMaximumLayer(layer, name); + break; + } + case armnn::LayerType::Mean : + { + const armnn::MeanDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeMeanLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Merge : + { + SerializeMergeLayer(layer, name); + break; + } + case armnn::LayerType::Minimum : + { + SerializeMinimumLayer(layer, name); + break; + } + case armnn::LayerType::Multiplication : + { + SerializeMultiplicationLayer(layer, name); + break; + } + case armnn::LayerType::Normalization : + { + const armnn::NormalizationDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeNormalizationLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Output: + { + SerializeOutputLayer(layer, id, name); + break; + } + case armnn::LayerType::Pad : + { + const armnn::PadDescriptor& layerDescriptor = + static_cast(descriptor); + SerializePadLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Permute : + { + const armnn::PermuteDescriptor& layerDescriptor = + static_cast(descriptor); + SerializePermuteLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Pooling2d : + { + const armnn::Pooling2dDescriptor& layerDescriptor = + static_cast(descriptor); + SerializePooling2dLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Prelu : + { + SerializePreluLayer(layer, name); + break; + } + case armnn::LayerType::Quantize : + { + SerializeQuantizeLayer(layer, name); + break; + } + case armnn::LayerType::QuantizedLstm: + SerializeQuantizedLstmLayer(layer, constants, name); + break; + case armnn::LayerType::Reshape: + { + const armnn::ReshapeDescriptor &layerDescriptor = + static_cast(descriptor); + SerializeReshapeLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Rank: + { + SerializeRankLayer(layer, name); + break; + } + case armnn::LayerType::Reduce: + { + const armnn::ReduceDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeReduceLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Resize: + { + const armnn::ResizeDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeResizeLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Slice: + { + const armnn::SliceDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeSliceLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Softmax: + { + const armnn::SoftmaxDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeSoftmaxLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::SpaceToBatchNd: + { + const armnn::SpaceToBatchNdDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::SpaceToDepth: + { + const armnn::SpaceToDepthDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeSpaceToDepthLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Splitter: + { + const armnn::SplitterDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeSplitterLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Stack: + { + const armnn::StackDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeStackLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::StandIn: + { + const armnn::StandInDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeStandInLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::StridedSlice: + { + const armnn::StridedSliceDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeStridedSliceLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::Subtraction: + { + SerializeSubtractionLayer(layer, name); + break; + } + case armnn::LayerType::Switch: + { + SerializeSwitchLayer(layer, name); + break; + } + case armnn::LayerType::Transpose: + { + const armnn::TransposeDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeTransposeLayer(layer, layerDescriptor, name); + break; + } + case armnn::LayerType::TransposeConvolution2d: + { + const armnn::TransposeConvolution2dDescriptor& layerDescriptor = + static_cast(descriptor); + SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name); + break; + } + default: + { + throw InvalidArgumentException( + fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}", + layer->GetName(), + id)); + } + } +} + void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork) { // Iterate through to network - inNetwork.Accept(m_SerializerVisitor); - flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder(); + inNetwork.ExecuteStrategy(m_SerializerStrategy); + flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder(); // Create FlatBuffer SerializedGraph auto serializedGraph = serializer::CreateSerializedGraph( - fbBuilder, - fbBuilder.CreateVector(m_SerializerVisitor.GetSerializedLayers()), - fbBuilder.CreateVector(m_SerializerVisitor.GetInputIds()), - fbBuilder.CreateVector(m_SerializerVisitor.GetOutputIds()), - m_SerializerVisitor.GetVersionTable()); + fbBuilder, + fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()), + fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()), + fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()), + m_SerializerStrategy.GetVersionTable()); // Serialize the graph fbBuilder.Finish(serializedGraph); } + bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream) { - flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder(); + flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder(); auto bytesToWrite = armnn::numeric_cast(fbBuilder.GetSize()); stream.write(reinterpret_cast(fbBuilder.GetBufferPointer()), bytesToWrite); return !stream.bad(); } - } // namespace armnnSerializer diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 10971fddc8..7226006cea 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include @@ -18,11 +19,17 @@ namespace armnnSerializer { -class SerializerVisitor : public armnn::ILayerVisitor +class SerializerStrategy : public armnn::IStrategy { public: - SerializerVisitor() : m_layerId(0) {} - ~SerializerVisitor() {} + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) override; + + SerializerStrategy() : m_layerId(0) {} + ~SerializerStrategy() {} flatbuffers::FlatBufferBuilder& GetFlatBufferBuilder() { @@ -46,309 +53,297 @@ public: flatbuffers::Offset GetVersionTable(); +private: + /// Creates the Input Slots and Output Slots and LayerBase for the layer. + flatbuffers::Offset CreateLayerBase( + const armnn::IConnectableLayer* layer, + const armnnSerializer::LayerType layerType); - ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") - void VisitAbsLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitActivationLayer(const armnn::IConnectableLayer* layer, - const armnn::ActivationDescriptor& descriptor, - const char* name = nullptr) override; - - void VisitAdditionLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer, - const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor, - const char* name = nullptr) override; - - void VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer, - const armnn::BatchToSpaceNdDescriptor& descriptor, - const char* name = nullptr) override; - - void VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer, - const armnn::BatchNormalizationDescriptor& BatchNormalizationDescriptor, - const armnn::ConstTensor& mean, - const armnn::ConstTensor& variance, - const armnn::ConstTensor& beta, - const armnn::ConstTensor& gamma, - const char* name = nullptr) override; - - void VisitComparisonLayer(const armnn::IConnectableLayer* layer, - const armnn::ComparisonDescriptor& descriptor, - const char* name = nullptr) override; - - void VisitConcatLayer(const armnn::IConnectableLayer* layer, - const armnn::ConcatDescriptor& concatDescriptor, - const char* name = nullptr) override; - - void VisitConstantLayer(const armnn::IConnectableLayer* layer, - const armnn::ConstTensor& input, - const char* = nullptr) override; - - void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer, - const armnn::Convolution2dDescriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* = nullptr) override; - - void VisitDepthToSpaceLayer(const armnn::IConnectableLayer* layer, - const armnn::DepthToSpaceDescriptor& descriptor, - const char* name = nullptr) override; - - void VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer, - const armnn::DepthwiseConvolution2dDescriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name = nullptr) override; - - void VisitDequantizeLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, - const armnn::DetectionPostProcessDescriptor& descriptor, - const armnn::ConstTensor& anchors, - const char* name = nullptr) override; - - void VisitDivisionLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; - - void VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer, - const armnn::ElementwiseUnaryDescriptor& descriptor, - const char* name = nullptr) override; + /// Creates the serializer AnyLayer for the layer and adds it to m_serializedLayers. + void CreateAnyLayer(const flatbuffers::Offset& layer, const armnnSerializer::Layer serializerLayer); - ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead") - void VisitEqualLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + /// Creates the serializer ConstTensor for the armnn ConstTensor. + flatbuffers::Offset CreateConstTensorInfo( + const armnn::ConstTensor& constTensor); - void VisitFillLayer(const armnn::IConnectableLayer* layer, - const armnn::FillDescriptor& fillDescriptor, - const char* name = nullptr) override; + /// Creates the serializer TensorInfo for the armnn TensorInfo. + flatbuffers::Offset CreateTensorInfo(const armnn::TensorInfo& tensorInfo); - void VisitFloorLayer(const armnn::IConnectableLayer *layer, - const char *name = nullptr) override; + template + flatbuffers::Offset> CreateDataVector(const void* memory, unsigned int size); - void VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer, - const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name = nullptr) override; + ///Function which maps Guid to an index + uint32_t GetSerializedId(armnn::LayerGuid guid); - ARMNN_DEPRECATED_MSG("Use VisitGatherLayer with descriptor instead") - void VisitGatherLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + /// Creates the serializer InputSlots for the layer. + std::vector> CreateInputSlots( + const armnn::IConnectableLayer* layer); - void VisitGatherLayer(const armnn::IConnectableLayer* layer, - const armnn::GatherDescriptor& gatherDescriptor, - const char* name = nullptr) override; + /// Creates the serializer OutputSlots for the layer. + std::vector> CreateOutputSlots( + const armnn::IConnectableLayer* layer); - ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead") - void VisitGreaterLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + /// FlatBufferBuilder to create our layers' FlatBuffers. + flatbuffers::FlatBufferBuilder m_flatBufferBuilder; - void VisitInputLayer(const armnn::IConnectableLayer* layer, - armnn::LayerBindingId id, - const char* name = nullptr) override; + /// AnyLayers required by the SerializedGraph. + std::vector> m_serializedLayers; - void VisitInstanceNormalizationLayer(const armnn::IConnectableLayer* layer, - const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor, - const char* name = nullptr) override; + /// Vector of the binding ids of all Input Layers required by the SerializedGraph. + std::vector m_inputIds; - void VisitL2NormalizationLayer(const armnn::IConnectableLayer* layer, - const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor, - const char* name = nullptr) override; + /// Vector of the binding ids of all Output Layers required by the SerializedGraph. + std::vector m_outputIds; - void VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer, - const armnn::LogicalBinaryDescriptor& descriptor, - const char* name = nullptr) override; + /// Mapped Guids of all Layers to match our index. + std::unordered_map m_guidMap; - void VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer, - const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor, - const char* name = nullptr) override; + /// layer within our FlatBuffer index. + uint32_t m_layerId; + +private: + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") + void SerializeAbsLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::LstmDescriptor& descriptor, - const armnn::LstmInputParams& params, - const char* name = nullptr) override; + void SerializeActivationLayer(const armnn::IConnectableLayer* layer, + const armnn::ActivationDescriptor& descriptor, + const char* name = nullptr); - void VisitMeanLayer(const armnn::IConnectableLayer* layer, - const armnn::MeanDescriptor& descriptor, - const char* name) override; + void SerializeAdditionLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitMinimumLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeArgMinMaxLayer(const armnn::IConnectableLayer* layer, + const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor, + const char* name = nullptr); - void VisitMaximumLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer, + const armnn::BatchToSpaceNdDescriptor& descriptor, + const char* name = nullptr); - void VisitMergeLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeBatchNormalizationLayer(const armnn::IConnectableLayer* layer, + const armnn::BatchNormalizationDescriptor& BatchNormalizationDescriptor, + const std::vector& constants, + const char* name = nullptr); - ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead") - void VisitMergerLayer(const armnn::IConnectableLayer* layer, - const armnn::MergerDescriptor& mergerDescriptor, - const char* name = nullptr) override; + void SerializeComparisonLayer(const armnn::IConnectableLayer* layer, + const armnn::ComparisonDescriptor& descriptor, + const char* name = nullptr); - void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeConcatLayer(const armnn::IConnectableLayer* layer, + const armnn::ConcatDescriptor& concatDescriptor, + const char* name = nullptr); - void VisitOutputLayer(const armnn::IConnectableLayer* layer, - armnn::LayerBindingId id, - const char* name = nullptr) override; + void SerializeConstantLayer(const armnn::IConnectableLayer* layer, + const std::vector& contants, + const char* name = nullptr); - void VisitPadLayer(const armnn::IConnectableLayer* layer, - const armnn::PadDescriptor& PadDescriptor, - const char* name = nullptr) override; + void SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer, + const armnn::Convolution2dDescriptor& descriptor, + const std::vector& contants, + const char* name = nullptr); - void VisitPermuteLayer(const armnn::IConnectableLayer* layer, - const armnn::PermuteDescriptor& PermuteDescriptor, - const char* name = nullptr) override; + void SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer, + const armnn::DepthToSpaceDescriptor& descriptor, + const char* name = nullptr); - void VisitPooling2dLayer(const armnn::IConnectableLayer* layer, - const armnn::Pooling2dDescriptor& pooling2dDescriptor, - const char* name = nullptr) override; + void SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer, + const armnn::DepthwiseConvolution2dDescriptor& descriptor, + const std::vector& constants, + const char* name = nullptr); - void VisitPreluLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeDequantizeLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitQuantizeLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, + const armnn::DetectionPostProcessDescriptor& descriptor, + const std::vector& constants, + const char* name = nullptr); - void VisitQLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::QLstmDescriptor& descriptor, - const armnn::LstmInputParams& params, - const char* name = nullptr) override; + void SerializeDivisionLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::QuantizedLstmInputParams& params, - const char* name = nullptr) override; + void SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer, + const armnn::ElementwiseUnaryDescriptor& descriptor, + const char* name = nullptr); - void VisitRankLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead") + void SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name); - void VisitReduceLayer(const armnn::IConnectableLayer* layer, - const armnn::ReduceDescriptor& reduceDescriptor, - const char* name = nullptr) override; + void SerializeFillLayer(const armnn::IConnectableLayer* layer, + const armnn::FillDescriptor& fillDescriptor, + const char* name = nullptr); - void VisitReshapeLayer(const armnn::IConnectableLayer* layer, - const armnn::ReshapeDescriptor& reshapeDescriptor, - const char* name = nullptr) override; + void SerializeFloorLayer(const armnn::IConnectableLayer *layer, + const char *name = nullptr); - void VisitResizeLayer(const armnn::IConnectableLayer* layer, - const armnn::ResizeDescriptor& resizeDescriptor, - const char* name = nullptr) override; + void SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer, + const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor, + const std::vector& constants, + const char* name = nullptr); - ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead") - void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, - const armnn::ResizeBilinearDescriptor& resizeDescriptor, - const char* name = nullptr) override; + void SerializeGatherLayer(const armnn::IConnectableLayer* layer, + const armnn::GatherDescriptor& gatherDescriptor, + const char* name = nullptr); - ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") - void VisitRsqrtLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead") + void SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr); + + void SerializeInputLayer(const armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const char* name = nullptr); - void VisitSliceLayer(const armnn::IConnectableLayer* layer, - const armnn::SliceDescriptor& sliceDescriptor, - const char* name = nullptr) override; + void SerializeInstanceNormalizationLayer(const armnn::IConnectableLayer* layer, + const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor, + const char* name = nullptr); - void VisitSoftmaxLayer(const armnn::IConnectableLayer* layer, - const armnn::SoftmaxDescriptor& softmaxDescriptor, - const char* name = nullptr) override; + void SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer, + const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor, + const char* name = nullptr); - void VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer, - const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, - const char* name = nullptr) override; + void SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer, + const armnn::LogicalBinaryDescriptor& descriptor, + const char* name = nullptr); - void VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer, - const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor, - const char* name = nullptr) override; + void SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer, + const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor, + const char* name = nullptr); - void VisitNormalizationLayer(const armnn::IConnectableLayer* layer, - const armnn::NormalizationDescriptor& normalizationDescriptor, - const char* name = nullptr) override; + void SerializeLstmLayer(const armnn::IConnectableLayer* layer, + const armnn::LstmDescriptor& descriptor, + const std::vector& constants, + const char* name = nullptr); - void VisitSplitterLayer(const armnn::IConnectableLayer* layer, - const armnn::ViewsDescriptor& viewsDescriptor, - const char* name = nullptr) override; + void SerializeMeanLayer(const armnn::IConnectableLayer* layer, + const armnn::MeanDescriptor& descriptor, + const char* name); - void VisitStandInLayer(const armnn::IConnectableLayer* layer, - const armnn::StandInDescriptor& standInDescriptor, - const char* name = nullptr) override; + void SerializeMinimumLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitStackLayer(const armnn::IConnectableLayer* layer, - const armnn::StackDescriptor& stackDescriptor, - const char* name = nullptr) override; + void SerializeMaximumLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitStridedSliceLayer(const armnn::IConnectableLayer* layer, - const armnn::StridedSliceDescriptor& stridedSliceDescriptor, - const char* name = nullptr) override; + void SerializeMergeLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitSubtractionLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead") + void SerializeMergerLayer(const armnn::IConnectableLayer* layer, + const armnn::MergerDescriptor& mergerDescriptor, + const char* name = nullptr); - void VisitSwitchLayer(const armnn::IConnectableLayer* layer, - const char* name = nullptr) override; + void SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - void VisitTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer, - const armnn::TransposeConvolution2dDescriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* = nullptr) override; + void SerializeOutputLayer(const armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const char* name = nullptr); - void VisitTransposeLayer(const armnn::IConnectableLayer* layer, - const armnn::TransposeDescriptor& descriptor, - const char* name = nullptr) override; + void SerializePadLayer(const armnn::IConnectableLayer* layer, + const armnn::PadDescriptor& PadDescriptor, + const char* name = nullptr); -private: + void SerializePermuteLayer(const armnn::IConnectableLayer* layer, + const armnn::PermuteDescriptor& PermuteDescriptor, + const char* name = nullptr); - /// Creates the Input Slots and Output Slots and LayerBase for the layer. - flatbuffers::Offset CreateLayerBase( - const armnn::IConnectableLayer* layer, - const armnnSerializer::LayerType layerType); + void SerializePooling2dLayer(const armnn::IConnectableLayer* layer, + const armnn::Pooling2dDescriptor& pooling2dDescriptor, + const char* name = nullptr); - /// Creates the serializer AnyLayer for the layer and adds it to m_serializedLayers. - void CreateAnyLayer(const flatbuffers::Offset& layer, const armnnSerializer::Layer serializerLayer); + void SerializePreluLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - /// Creates the serializer ConstTensor for the armnn ConstTensor. - flatbuffers::Offset CreateConstTensorInfo( - const armnn::ConstTensor& constTensor); + void SerializeQuantizeLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - /// Creates the serializer TensorInfo for the armnn TensorInfo. - flatbuffers::Offset CreateTensorInfo(const armnn::TensorInfo& tensorInfo); + void SerializeQLstmLayer(const armnn::IConnectableLayer* layer, + const armnn::QLstmDescriptor& descriptor, + const std::vector& constants, + const char* name = nullptr); - template - flatbuffers::Offset> CreateDataVector(const void* memory, unsigned int size); + void SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer, + const std::vector& constants, + const char* name = nullptr); - ///Function which maps Guid to an index - uint32_t GetSerializedId(armnn::LayerGuid guid); + void SerializeRankLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - /// Creates the serializer InputSlots for the layer. - std::vector> CreateInputSlots( - const armnn::IConnectableLayer* layer); + void SerializeReduceLayer(const armnn::IConnectableLayer* layer, + const armnn::ReduceDescriptor& reduceDescriptor, + const char* name = nullptr); - /// Creates the serializer OutputSlots for the layer. - std::vector> CreateOutputSlots( - const armnn::IConnectableLayer* layer); + void SerializeReshapeLayer(const armnn::IConnectableLayer* layer, + const armnn::ReshapeDescriptor& reshapeDescriptor, + const char* name = nullptr); - /// FlatBufferBuilder to create our layers' FlatBuffers. - flatbuffers::FlatBufferBuilder m_flatBufferBuilder; + void SerializeResizeLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeDescriptor& resizeDescriptor, + const char* name = nullptr); - /// AnyLayers required by the SerializedGraph. - std::vector> m_serializedLayers; + ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead") + void SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeBilinearDescriptor& resizeDescriptor, + const char* name = nullptr); - /// Vector of the binding ids of all Input Layers required by the SerializedGraph. - std::vector m_inputIds; + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") + void SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); - /// Vector of the binding ids of all Output Layers required by the SerializedGraph. - std::vector m_outputIds; + void SerializeSliceLayer(const armnn::IConnectableLayer* layer, + const armnn::SliceDescriptor& sliceDescriptor, + const char* name = nullptr); - /// Mapped Guids of all Layers to match our index. - std::unordered_map m_guidMap; + void SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer, + const armnn::SoftmaxDescriptor& softmaxDescriptor, + const char* name = nullptr); - /// layer within our FlatBuffer index. - uint32_t m_layerId; + void SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer, + const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, + const char* name = nullptr); + + void SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer, + const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor, + const char* name = nullptr); + + void SerializeNormalizationLayer(const armnn::IConnectableLayer* layer, + const armnn::NormalizationDescriptor& normalizationDescriptor, + const char* name = nullptr); + + void SerializeSplitterLayer(const armnn::IConnectableLayer* layer, + const armnn::ViewsDescriptor& viewsDescriptor, + const char* name = nullptr); + + void SerializeStandInLayer(const armnn::IConnectableLayer* layer, + const armnn::StandInDescriptor& standInDescriptor, + const char* name = nullptr); + + void SerializeStackLayer(const armnn::IConnectableLayer* layer, + const armnn::StackDescriptor& stackDescriptor, + const char* name = nullptr); + + void SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer, + const armnn::StridedSliceDescriptor& stridedSliceDescriptor, + const char* name = nullptr); + + void SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); + + void SerializeSwitchLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr); + + void SerializeTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer, + const armnn::TransposeConvolution2dDescriptor& descriptor, + const std::vector& constants, + const char* = nullptr); + + void SerializeTransposeLayer(const armnn::IConnectableLayer* layer, + const armnn::TransposeDescriptor& descriptor, + const char* name = nullptr); }; + + class ISerializer::SerializerImpl { public: @@ -367,7 +362,7 @@ public: private: /// Visitor to contruct serialized network - SerializerVisitor m_SerializerVisitor; + SerializerStrategy m_SerializerStrategy; }; } //namespace armnnSerializer diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp index 1645731413..fbe1ae0ad4 100644 --- a/src/armnnSerializer/test/ActivationSerializationTests.cpp +++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp @@ -17,15 +17,20 @@ BOOST_AUTO_TEST_SUITE(SerializerTests) -class VerifyActivationName : public armnn::LayerVisitorBase +class VerifyActivationName : public armnn::IStrategy { public: - void VisitActivationLayer(const armnn::IConnectableLayer* layer, - const armnn::ActivationDescriptor& activationDescriptor, - const char* name) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - IgnoreUnused(layer, activationDescriptor); - BOOST_TEST(name == "activation"); + IgnoreUnused(layer, descriptor, constants, id); + if (layer->GetType() == armnn::LayerType::Activation) + { + BOOST_TEST(name == "activation"); + } } }; @@ -67,7 +72,7 @@ BOOST_AUTO_TEST_CASE(ActivationSerialization) armnn::INetworkPtr deserializedNetwork = parser->CreateNetworkFromBinary(serializerVector); VerifyActivationName visitor; - deserializedNetwork->Accept(visitor); + deserializedNetwork->ExecuteStrategy(visitor); armnn::IRuntime::CreationOptions options; // default options armnn::IRuntimePtr run = armnn::IRuntime::Create(options); diff --git a/src/armnnSerializer/test/ComparisonSerializationTests.cpp b/src/armnnSerializer/test/ComparisonSerializationTests.cpp new file mode 100644 index 0000000000..3aee9a7bcb --- /dev/null +++ b/src/armnnSerializer/test/ComparisonSerializationTests.cpp @@ -0,0 +1,123 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../Serializer.hpp" +#include "SerializerTestUtils.hpp" + +#include +#include +#include +#include +#include + +#include + + +BOOST_AUTO_TEST_SUITE(SerializerTests) + +struct ComparisonModel +{ + ComparisonModel(const std::string& layerName, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + armnn::ComparisonDescriptor& descriptor) + : m_network(armnn::INetwork::Create()) + { + armnn::IConnectableLayer* const inputLayer0 = m_network->AddInputLayer(0); + armnn::IConnectableLayer* const inputLayer1 = m_network->AddInputLayer(1); + armnn::IConnectableLayer* const equalLayer = m_network->AddComparisonLayer(descriptor, layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = m_network->AddOutputLayer(0); + + inputLayer0->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0)); + inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(1)); + equalLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer0->GetOutputSlot(0).SetTensorInfo(inputInfo); + inputLayer1->GetOutputSlot(0).SetTensorInfo(inputInfo); + equalLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + } + + armnn::INetworkPtr m_network; +}; + +class ComparisonLayerVerifier : public LayerVerifierBase +{ +public: + ComparisonLayerVerifier(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos, + const armnn::ComparisonDescriptor& descriptor) + : LayerVerifierBase(layerName, inputInfos, outputInfos) + , m_Descriptor (descriptor) {} + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override + { + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Comparison: + { + VerifyNameAndConnections(layer, name); + const armnn::ComparisonDescriptor& layerDescriptor = + static_cast(descriptor); + BOOST_CHECK(layerDescriptor.m_Operation == m_Descriptor.m_Operation); + break; + } + default: + { + throw armnn::Exception("Unexpected layer type in Comparison test model"); + } + } + } + +private: + armnn::ComparisonDescriptor m_Descriptor; +}; + +BOOST_AUTO_TEST_CASE(SerializeEqual) +{ + const std::string layerName("equal"); + + const armnn::TensorShape shape{2, 1, 2, 4}; + const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Float32); + const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + + armnn::ComparisonDescriptor descriptor (armnn::ComparisonOperation::Equal); + + ComparisonModel model(layerName, inputInfo, outputInfo, descriptor); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*model.m_network)); + BOOST_CHECK(deserializedNetwork); + + ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); +} + +BOOST_AUTO_TEST_CASE(SerializeGreater) +{ + const std::string layerName("greater"); + + const armnn::TensorShape shape{2, 1, 2, 4}; + const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Float32); + const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + + armnn::ComparisonDescriptor descriptor (armnn::ComparisonOperation::Greater); + + ComparisonModel model(layerName, inputInfo, outputInfo, descriptor); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*model.m_network)); + BOOST_CHECK(deserializedNetwork); + + ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp new file mode 100644 index 0000000000..4705c0bd28 --- /dev/null +++ b/src/armnnSerializer/test/LstmSerializationTests.cpp @@ -0,0 +1,2199 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../Serializer.hpp" +#include "SerializerTestUtils.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + + +BOOST_AUTO_TEST_SUITE(SerializerTests) + +template +armnn::LstmInputParams ConstantVector2LstmInputParams(const std::vector& constants, + Descriptor& descriptor) +{ + armnn::LstmInputParams lstmInputParams; + size_t i = 0; + + // Inserting basic paramters + lstmInputParams.m_InputToForgetWeights = &constants[i++]; + lstmInputParams.m_InputToCellWeights = &constants[i++]; + lstmInputParams.m_InputToOutputWeights = &constants[i++]; + lstmInputParams.m_RecurrentToForgetWeights = &constants[i++]; + lstmInputParams.m_RecurrentToCellWeights = &constants[i++]; + lstmInputParams.m_RecurrentToOutputWeights = &constants[i++]; + lstmInputParams.m_ForgetGateBias = &constants[i++]; + lstmInputParams.m_CellBias = &constants[i++]; + lstmInputParams.m_OutputGateBias = &constants[i++]; + if (!descriptor.m_CifgEnabled) + { + lstmInputParams.m_InputToInputWeights = &constants[i++]; + lstmInputParams.m_RecurrentToInputWeights = &constants[i++]; + lstmInputParams.m_InputGateBias = &constants[i++]; + } + + if (descriptor.m_PeepholeEnabled) + { + if (!descriptor.m_CifgEnabled) + { + lstmInputParams.m_CellToInputWeights = &constants[i++]; + } + lstmInputParams.m_CellToForgetWeights = &constants[i++]; + lstmInputParams.m_CellToOutputWeights = &constants[i++]; + } + + if (descriptor.m_ProjectionEnabled) + { + lstmInputParams.m_ProjectionWeights = &constants[i++]; + lstmInputParams.m_ProjectionBias = &constants[i++]; + } + + if (descriptor.m_LayerNormEnabled) + { + if (!descriptor.m_CifgEnabled) + { + lstmInputParams.m_InputLayerNormWeights = &constants[i++]; + } + lstmInputParams.m_ForgetLayerNormWeights = &constants[i++]; + lstmInputParams.m_CellLayerNormWeights = &constants[i++]; + lstmInputParams.m_OutputLayerNormWeights = &constants[i++]; + } + + return lstmInputParams; +} + +// Works for Lstm and QLstm (QuantizedLstm uses different parameters) +template +class VerifyLstmLayer : public LayerVerifierBaseWithDescriptor +{ +public: + VerifyLstmLayer(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos, + const Descriptor& descriptor, + const armnn::LstmInputParams& inputParams) + : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) + , m_InputParams(inputParams) {} + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override + { + armnn::IgnoreUnused(constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Lstm: + { + this->VerifyNameAndConnections(layer, name); + const Descriptor& internalDescriptor = static_cast(descriptor); + this->VerifyDescriptor(internalDescriptor); + armnn::LstmInputParams lstmParams = ConstantVector2LstmInputParams(constants, internalDescriptor); + VerifyInputParameters(lstmParams); + break; + } + case armnn::LayerType::QLstm: + { + this->VerifyNameAndConnections(layer, name); + const Descriptor& internalDescriptor = static_cast(descriptor); + this->VerifyDescriptor(internalDescriptor); + armnn::LstmInputParams lstmParams = ConstantVector2LstmInputParams(constants, internalDescriptor); + VerifyInputParameters(lstmParams); + break; + } + default: + { + throw armnn::Exception("Unexpected layer type in Lstm test model"); + } + } + } + +protected: + void VerifyInputParameters(const armnn::LstmInputParams& params) + { + this->VerifyConstTensors( + "m_InputToInputWeights", m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights); + this->VerifyConstTensors( + "m_InputToForgetWeights", m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights); + this->VerifyConstTensors( + "m_InputToCellWeights", m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights); + this->VerifyConstTensors( + "m_InputToOutputWeights", m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights); + this->VerifyConstTensors( + "m_RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights); + this->VerifyConstTensors( + "m_RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights); + this->VerifyConstTensors( + "m_RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights); + this->VerifyConstTensors( + "m_RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights); + this->VerifyConstTensors( + "m_CellToInputWeights", m_InputParams.m_CellToInputWeights, params.m_CellToInputWeights); + this->VerifyConstTensors( + "m_CellToForgetWeights", m_InputParams.m_CellToForgetWeights, params.m_CellToForgetWeights); + this->VerifyConstTensors( + "m_CellToOutputWeights", m_InputParams.m_CellToOutputWeights, params.m_CellToOutputWeights); + this->VerifyConstTensors( + "m_InputGateBias", m_InputParams.m_InputGateBias, params.m_InputGateBias); + this->VerifyConstTensors( + "m_ForgetGateBias", m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias); + this->VerifyConstTensors( + "m_CellBias", m_InputParams.m_CellBias, params.m_CellBias); + this->VerifyConstTensors( + "m_OutputGateBias", m_InputParams.m_OutputGateBias, params.m_OutputGateBias); + this->VerifyConstTensors( + "m_ProjectionWeights", m_InputParams.m_ProjectionWeights, params.m_ProjectionWeights); + this->VerifyConstTensors( + "m_ProjectionBias", m_InputParams.m_ProjectionBias, params.m_ProjectionBias); + this->VerifyConstTensors( + "m_InputLayerNormWeights", m_InputParams.m_InputLayerNormWeights, params.m_InputLayerNormWeights); + this->VerifyConstTensors( + "m_ForgetLayerNormWeights", m_InputParams.m_ForgetLayerNormWeights, params.m_ForgetLayerNormWeights); + this->VerifyConstTensors( + "m_CellLayerNormWeights", m_InputParams.m_CellLayerNormWeights, params.m_CellLayerNormWeights); + this->VerifyConstTensors( + "m_OutputLayerNormWeights", m_InputParams.m_OutputLayerNormWeights, params.m_OutputLayerNormWeights); + } + +private: + armnn::LstmInputParams m_InputParams; +}; + +BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmCifgPeepholeNoProjection) +{ + armnn::LstmDescriptor descriptor; + descriptor.m_ActivationFunc = 4; + descriptor.m_ClippingThresProj = 0.0f; + descriptor.m_ClippingThresCell = 0.0f; + descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams + descriptor.m_ProjectionEnabled = false; + descriptor.m_PeepholeEnabled = true; + + const uint32_t batchSize = 1; + const uint32_t inputSize = 2; + const uint32_t numUnits = 4; + const uint32_t outputSize = numUnits; + + armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32); + std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); + armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData); + + std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); + armnn::ConstTensor inputToCellWeights(inputWeightsInfo1, inputToCellWeightsData); + + std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); + armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData); + + armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32); + std::vector recurrentToForgetWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); + armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData); + + std::vector recurrentToCellWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); + armnn::ConstTensor recurrentToCellWeights(inputWeightsInfo2, recurrentToCellWeightsData); + + std::vector recurrentToOutputWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); + armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData); + + armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32); + std::vector cellToForgetWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); + armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData); + + std::vector cellToOutputWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); + armnn::ConstTensor cellToOutputWeights(inputWeightsInfo3, cellToOutputWeightsData); + + std::vector forgetGateBiasData(numUnits, 1.0f); + armnn::ConstTensor forgetGateBias(inputWeightsInfo3, forgetGateBiasData); + + std::vector cellBiasData(numUnits, 0.0f); + armnn::ConstTensor cellBias(inputWeightsInfo3, cellBiasData); + + std::vector outputGateBiasData(numUnits, 0.0f); + armnn::ConstTensor outputGateBias(inputWeightsInfo3, outputGateBiasData); + + armnn::LstmInputParams params; + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + params.m_CellToForgetWeights = &cellToForgetWeights; + params.m_CellToOutputWeights = &cellToOutputWeights; + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); + const std::string layerName("lstm"); + armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str()); + armnn::IConnectableLayer* const scratchBuffer = network->AddOutputLayer(0); + armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(1); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(2); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(3); + + // connect up + armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); + armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); + armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 3 }, armnn::DataType::Float32); + + inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + + outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); + + cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); + + lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0)); + lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff); + + lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0)); + lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); + + lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0)); + lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo); + + lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0)); + lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyLstmLayer checker( + layerName, + {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, + {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, + descriptor, + params); + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeAndProjection) +{ + armnn::LstmDescriptor descriptor; + descriptor.m_ActivationFunc = 4; + descriptor.m_ClippingThresProj = 0.0f; + descriptor.m_ClippingThresCell = 0.0f; + descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams + descriptor.m_ProjectionEnabled = true; + descriptor.m_PeepholeEnabled = true; + + const uint32_t batchSize = 2; + const uint32_t inputSize = 5; + const uint32_t numUnits = 20; + const uint32_t outputSize = 16; + + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); + + std::vector inputToForgetWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData); + + std::vector inputToCellWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData); + + std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); + + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); + + std::vector forgetGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData); + + std::vector cellBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellBias(tensorInfo20, cellBiasData); + + std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); + + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); + + std::vector recurrentToForgetWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData); + + std::vector recurrentToCellWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData); + + std::vector recurrentToOutputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData); + + std::vector cellToInputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData); + + std::vector cellToForgetWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData); + + std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); + + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); + armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); + + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + std::vector projectionBiasData(outputSize, 0.f); + armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); + + armnn::LstmInputParams params; + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + // additional params because: descriptor.m_CifgEnabled = false + params.m_InputToInputWeights = &inputToInputWeights; + params.m_RecurrentToInputWeights = &recurrentToInputWeights; + params.m_CellToInputWeights = &cellToInputWeights; + params.m_InputGateBias = &inputGateBias; + + // additional params because: descriptor.m_ProjectionEnabled = true + params.m_ProjectionWeights = &projectionWeights; + params.m_ProjectionBias = &projectionBias; + + // additional params because: descriptor.m_PeepholeEnabled = true + params.m_CellToForgetWeights = &cellToForgetWeights; + params.m_CellToOutputWeights = &cellToOutputWeights; + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); + const std::string layerName("lstm"); + armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str()); + armnn::IConnectableLayer* const scratchBuffer = network->AddOutputLayer(0); + armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(1); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(2); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(3); + + // connect up + armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); + armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); + armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32); + + inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + + outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); + + cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); + + lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0)); + lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff); + + lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0)); + lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); + + lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0)); + lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo); + + lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0)); + lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyLstmLayer checker( + layerName, + {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, + {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, + descriptor, + params); + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm) +{ + armnn::LstmDescriptor descriptor; + descriptor.m_ActivationFunc = 4; + descriptor.m_ClippingThresProj = 0.0f; + descriptor.m_ClippingThresCell = 0.0f; + descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams + descriptor.m_ProjectionEnabled = true; + descriptor.m_PeepholeEnabled = true; + descriptor.m_LayerNormEnabled = true; + + const uint32_t batchSize = 2; + const uint32_t inputSize = 5; + const uint32_t numUnits = 20; + const uint32_t outputSize = 16; + + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); + + std::vector inputToForgetWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData); + + std::vector inputToCellWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData); + + std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); + armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); + + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); + + std::vector forgetGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData); + + std::vector cellBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellBias(tensorInfo20, cellBiasData); + + std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); + + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); + + std::vector recurrentToForgetWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData); + + std::vector recurrentToCellWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData); + + std::vector recurrentToOutputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); + armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData); + + std::vector cellToInputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData); + + std::vector cellToForgetWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData); + + std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); + + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); + armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); + + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + std::vector projectionBiasData(outputSize, 0.f); + armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); + + std::vector inputLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor inputLayerNormWeights(tensorInfo20, forgetGateBiasData); + + std::vector forgetLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor forgetLayerNormWeights(tensorInfo20, forgetGateBiasData); + + std::vector cellLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor cellLayerNormWeights(tensorInfo20, forgetGateBiasData); + + std::vector outLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); + armnn::ConstTensor outLayerNormWeights(tensorInfo20, forgetGateBiasData); + + armnn::LstmInputParams params; + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + // additional params because: descriptor.m_CifgEnabled = false + params.m_InputToInputWeights = &inputToInputWeights; + params.m_RecurrentToInputWeights = &recurrentToInputWeights; + params.m_CellToInputWeights = &cellToInputWeights; + params.m_InputGateBias = &inputGateBias; + + // additional params because: descriptor.m_ProjectionEnabled = true + params.m_ProjectionWeights = &projectionWeights; + params.m_ProjectionBias = &projectionBias; + + // additional params because: descriptor.m_PeepholeEnabled = true + params.m_CellToForgetWeights = &cellToForgetWeights; + params.m_CellToOutputWeights = &cellToOutputWeights; + + // additional params because: despriptor.m_LayerNormEnabled = true + params.m_InputLayerNormWeights = &inputLayerNormWeights; + params.m_ForgetLayerNormWeights = &forgetLayerNormWeights; + params.m_CellLayerNormWeights = &cellLayerNormWeights; + params.m_OutputLayerNormWeights = &outLayerNormWeights; + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); + const std::string layerName("lstm"); + armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str()); + armnn::IConnectableLayer* const scratchBuffer = network->AddOutputLayer(0); + armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(1); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(2); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(3); + + // connect up + armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); + armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); + armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32); + + inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + + outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); + + cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); + + lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0)); + lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff); + + lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0)); + lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); + + lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0)); + lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo); + + lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0)); + lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyLstmLayer checker( + layerName, + {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, + {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, + descriptor, + params); + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_CASE(EnsureLstmLayersBackwardCompatibility) +{ + // The hex data below is a flat buffer containing a lstm layer with no Cifg, with peephole and projection + // enabled. That data was obtained before additional layer normalization parameters where added to the + // lstm serializer. That way it can be tested if a lstm model with the old parameter configuration can + // still be loaded + const std::vector lstmNoCifgWithPeepholeAndProjectionModel = + { + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, + 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, + 0xDC, 0x29, 0x00, 0x00, 0x38, 0x29, 0x00, 0x00, 0xB4, 0x28, 0x00, 0x00, 0x94, 0x01, 0x00, 0x00, 0x3C, 0x01, + 0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x70, 0xD6, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x06, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x88, 0xD7, + 0xFF, 0xFF, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF6, 0xD6, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE8, 0xD7, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xC8, 0xD6, 0xFF, 0xFF, 0x00, 0x00, + 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x5E, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xE0, 0xD7, 0xFF, 0xFF, + 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x4E, 0xD7, 0xFF, 0xFF, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, + 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xD8, + 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, + 0x04, 0x00, 0x00, 0x00, 0xB6, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x38, 0xD8, 0xFF, 0xFF, 0x08, 0x00, + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xA6, 0xD7, 0xFF, 0xFF, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xD8, 0xFF, 0xFF, + 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x78, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, + 0x00, 0x00, 0x0E, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x16, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, + 0xFA, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, + 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xD8, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x6C, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x23, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, + 0x12, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0xE0, 0x25, 0x00, 0x00, 0xD0, 0x25, + 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x00, 0x48, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, + 0x10, 0x00, 0x14, 0x00, 0x18, 0x00, 0x1C, 0x00, 0x20, 0x00, 0x24, 0x00, 0x28, 0x00, 0x2C, 0x00, 0x30, 0x00, + 0x34, 0x00, 0x38, 0x00, 0x3C, 0x00, 0x40, 0x00, 0x44, 0x00, 0x26, 0x00, 0x00, 0x00, 0xC4, 0x23, 0x00, 0x00, + 0xF8, 0x21, 0x00, 0x00, 0x2C, 0x20, 0x00, 0x00, 0xF0, 0x1A, 0x00, 0x00, 0xB4, 0x15, 0x00, 0x00, 0x78, 0x10, + 0x00, 0x00, 0xF0, 0x0F, 0x00, 0x00, 0x68, 0x0F, 0x00, 0x00, 0xE0, 0x0E, 0x00, 0x00, 0x14, 0x0D, 0x00, 0x00, + 0xD8, 0x07, 0x00, 0x00, 0x50, 0x07, 0x00, 0x00, 0xC8, 0x06, 0x00, 0x00, 0x8C, 0x01, 0x00, 0x00, 0x14, 0x01, + 0x00, 0x00, 0x8C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, + 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x72, 0xD8, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x82, 0xD9, 0xFF, 0xFF, + 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xD8, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0xF6, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x54, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x06, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xD9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6A, 0xD9, 0xFF, 0xFF, 0x00, 0x00, + 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7A, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, + 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xA2, 0xDE, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB2, 0xDF, 0xFF, 0xFF, + 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xDF, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0x26, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x36, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x92, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xAA, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, + 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xBA, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xC6, 0xE4, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xE2, 0xE4, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF2, 0xE5, 0xFF, 0xFF, 0x04, 0x00, + 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, + 0x00, 0x00, 0xAA, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0xBA, 0xE7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x16, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x2E, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3E, 0xE8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xB2, 0xE7, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xC2, 0xE8, 0xFF, 0xFF, 0x04, 0x00, + 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0xE8, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, + 0x00, 0x00, 0x36, 0xE8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x46, 0xE9, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xED, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6E, 0xED, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x7E, 0xEE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x8A, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xA6, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, + 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB6, 0xF3, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xC2, 0xF7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xDE, 0xF7, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xF8, 0xFF, 0xFF, 0x04, 0x00, + 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, + 0x00, 0x00, 0xA6, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0xB6, 0xFA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xFB, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x6E, 0xFB, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7E, 0xFC, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1A, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x0C, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x01, 0x04, 0x00, 0x00, 0x00, 0x2E, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x22, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6C, 0x73, + 0x74, 0x6D, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xEC, 0x00, 0x00, 0x00, 0xD0, 0x00, 0x00, 0x00, + 0xB4, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x30, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0xA6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x3C, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0xCE, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0xFF, 0xFF, 0xFF, + 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0xB4, 0xFE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x1A, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, + 0xF0, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00, + 0x7E, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x76, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x68, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xCE, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x0E, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x0E, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6E, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00, + 0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, 0x04, 0x00, 0x06, 0x00, + 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, + 0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0C, 0x00, + 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00 + }; + + armnn::INetworkPtr deserializedNetwork = + DeserializeNetwork(std::string(lstmNoCifgWithPeepholeAndProjectionModel.begin(), + lstmNoCifgWithPeepholeAndProjectionModel.end())); + + BOOST_CHECK(deserializedNetwork); + + // generating the same model parameters which where used to serialize the model (Layer norm is not specified) + armnn::LstmDescriptor descriptor; + descriptor.m_ActivationFunc = 4; + descriptor.m_ClippingThresProj = 0.0f; + descriptor.m_ClippingThresCell = 0.0f; + descriptor.m_CifgEnabled = false; + descriptor.m_ProjectionEnabled = true; + descriptor.m_PeepholeEnabled = true; + + const uint32_t batchSize = 2u; + const uint32_t inputSize = 5u; + const uint32_t numUnits = 20u; + const uint32_t outputSize = 16u; + + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + std::vector inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); + armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); + + std::vector inputToForgetWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); + armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData); + + std::vector inputToCellWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); + armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData); + + std::vector inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); + armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); + + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + std::vector inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); + + std::vector forgetGateBiasData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData); + + std::vector cellBiasData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor cellBias(tensorInfo20, cellBiasData); + + std::vector outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); + + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + std::vector recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); + armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); + + std::vector recurrentToForgetWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); + armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData); + + std::vector recurrentToCellWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); + armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData); + + std::vector recurrentToOutputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); + armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData); + + std::vector cellToInputWeightsData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData); + + std::vector cellToForgetWeightsData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData); + + std::vector cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f); + armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); + + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + std::vector projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f); + armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); + + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + std::vector projectionBiasData(outputSize, 0.0f); + armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); + + armnn::LstmInputParams params; + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + // additional params because: descriptor.m_CifgEnabled = false + params.m_InputToInputWeights = &inputToInputWeights; + params.m_RecurrentToInputWeights = &recurrentToInputWeights; + params.m_CellToInputWeights = &cellToInputWeights; + params.m_InputGateBias = &inputGateBias; + + // additional params because: descriptor.m_ProjectionEnabled = true + params.m_ProjectionWeights = &projectionWeights; + params.m_ProjectionBias = &projectionBias; + + // additional params because: descriptor.m_PeepholeEnabled = true + params.m_CellToForgetWeights = &cellToForgetWeights; + params.m_CellToOutputWeights = &cellToOutputWeights; + + const std::string layerName("lstm"); + armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); + armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); + armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32); + + VerifyLstmLayer checker( + layerName, + {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, + {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, + descriptor, + params); + deserializedNetwork->ExecuteStrategy(checker); +} + +armnn::QuantizedLstmInputParams ConstantsVector2QuantizedLstmInputParams( + const std::vector& constants) +{ + armnn::QuantizedLstmInputParams params; + + // index for constants vector + size_t i = 0; + + // Get input parameters + params.m_InputToInputWeights = &constants[i++]; + params.m_InputToForgetWeights = &constants[i++]; + params.m_InputToCellWeights = &constants[i++]; + params.m_InputToOutputWeights = &constants[i++]; + + params.m_RecurrentToInputWeights = &constants[i++]; + params.m_RecurrentToForgetWeights = &constants[i++]; + params.m_RecurrentToCellWeights = &constants[i++]; + params.m_RecurrentToOutputWeights = &constants[i++]; + + params.m_InputGateBias = &constants[i++]; + params.m_ForgetGateBias = &constants[i++]; + params.m_CellBias = &constants[i++]; + params.m_OutputGateBias = &constants[i++]; + + return params; +} + +class VerifyQuantizedLstmLayer : public LayerVerifierBase +{ + +public: + VerifyQuantizedLstmLayer(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos, + const armnn::QuantizedLstmInputParams& inputParams) + : LayerVerifierBase(layerName, inputInfos, outputInfos), m_InputParams(inputParams) {} + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override + { + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::QuantizedLstm: + { + VerifyNameAndConnections(layer, name); + armnn::QuantizedLstmInputParams params = ConstantsVector2QuantizedLstmInputParams(constants); + VerifyInputParameters(params); + break; + } + default: + { + throw armnn::Exception(fmt::format("Unexpected layer type in QuantizedLstm test model:", + layer->GetName())); + } + } + } + +protected: + void VerifyInputParameters(const armnn::QuantizedLstmInputParams& params) + { + VerifyConstTensors("m_InputToInputWeights", + m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights); + VerifyConstTensors("m_InputToForgetWeights", + m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights); + VerifyConstTensors("m_InputToCellWeights", + m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights); + VerifyConstTensors("m_InputToOutputWeights", + m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights); + VerifyConstTensors("m_RecurrentToInputWeights", + m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights); + VerifyConstTensors("m_RecurrentToForgetWeights", + m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights); + VerifyConstTensors("m_RecurrentToCellWeights", + m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights); + VerifyConstTensors("m_RecurrentToOutputWeights", + m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights); + VerifyConstTensors("m_InputGateBias", + m_InputParams.m_InputGateBias, params.m_InputGateBias); + VerifyConstTensors("m_ForgetGateBias", + m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias); + VerifyConstTensors("m_CellBias", + m_InputParams.m_CellBias, params.m_CellBias); + VerifyConstTensors("m_OutputGateBias", + m_InputParams.m_OutputGateBias, params.m_OutputGateBias); + } + +private: + armnn::QuantizedLstmInputParams m_InputParams; +}; + +BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm) +{ + const uint32_t batchSize = 1; + const uint32_t inputSize = 2; + const uint32_t numUnits = 4; + const uint32_t outputSize = numUnits; + + // Scale/Offset for input/output, cellState In/Out, weights, bias + float inputOutputScale = 0.0078125f; + int32_t inputOutputOffset = 128; + + float cellStateScale = 0.00048828125f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.00408021f; + int32_t weightsOffset = 100; + + float biasScale = 3.1876640625e-05f; + int32_t biasOffset = 0; + + // The shape of weight data is {outputSize, inputSize} = {4, 2} + armnn::TensorShape inputToInputWeightsShape = {4, 2}; + std::vector inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; + armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData); + + armnn::TensorShape inputToForgetWeightsShape = {4, 2}; + std::vector inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; + armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData); + + armnn::TensorShape inputToCellWeightsShape = {4, 2}; + std::vector inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; + armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData); + + armnn::TensorShape inputToOutputWeightsShape = {4, 2}; + std::vector inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; + armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData); + + // The shape of recurrent weight data is {outputSize, outputSize} = {4, 4} + armnn::TensorShape recurrentToInputWeightsShape = {4, 4}; + std::vector recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData); + + armnn::TensorShape recurrentToForgetWeightsShape = {4, 4}; + std::vector recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData); + + armnn::TensorShape recurrentToCellWeightsShape = {4, 4}; + std::vector recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData); + + armnn::TensorShape recurrentToOutputWeightsShape = {4, 4}; + std::vector recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData); + + // The shape of bias data is {outputSize} = {4} + armnn::TensorShape inputGateBiasShape = {4}; + std::vector inputGateBiasData = {1, 2, 3, 4}; + armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape, + armnn::DataType::Signed32, + biasScale, + biasOffset); + armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData); + + armnn::TensorShape forgetGateBiasShape = {4}; + std::vector forgetGateBiasData = {1, 2, 3, 4}; + armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape, + armnn::DataType::Signed32, + biasScale, + biasOffset); + armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData); + + armnn::TensorShape cellBiasShape = {4}; + std::vector cellBiasData = {1, 2, 3, 4}; + armnn::TensorInfo cellBiasInfo(cellBiasShape, + armnn::DataType::Signed32, + biasScale, + biasOffset); + armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData); + + armnn::TensorShape outputGateBiasShape = {4}; + std::vector outputGateBiasData = {1, 2, 3, 4}; + armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape, + armnn::DataType::Signed32, + biasScale, + biasOffset); + armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData); + + armnn::QuantizedLstmInputParams params; + params.m_InputToInputWeights = &inputToInputWeights; + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + params.m_RecurrentToInputWeights = &recurrentToInputWeights; + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + params.m_InputGateBias = &inputGateBias; + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); + const std::string layerName("QuantizedLstm"); + armnn::IConnectableLayer* const quantizedLstmLayer = network->AddQuantizedLstmLayer(params, layerName.c_str()); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(0); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(1); + + // Connect up + armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, + armnn::DataType::QAsymmU8, + inputOutputScale, + inputOutputOffset); + armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits }, + armnn::DataType::QSymmS16, + cellStateScale, + cellStateOffset); + armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, + armnn::DataType::QAsymmU8, + inputOutputScale, + inputOutputOffset); + + inputLayer->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + + cellStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(1)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); + + outputStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(2)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); + + quantizedLstmLayer->GetOutputSlot(0).Connect(cellStateOut->GetInputSlot(0)); + quantizedLstmLayer->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); + + quantizedLstmLayer->GetOutputSlot(1).Connect(outputLayer->GetInputSlot(0)); + quantizedLstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyQuantizedLstmLayer checker(layerName, + {inputTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, + {cellStateTensorInfo, outputStateTensorInfo}, + params); + + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmBasic) +{ + armnn::QLstmDescriptor descriptor; + + descriptor.m_CifgEnabled = true; + descriptor.m_ProjectionEnabled = false; + descriptor.m_PeepholeEnabled = false; + descriptor.m_LayerNormEnabled = false; + + descriptor.m_CellClip = 0.0f; + descriptor.m_ProjectionClip = 0.0f; + + descriptor.m_InputIntermediateScale = 0.00001f; + descriptor.m_ForgetIntermediateScale = 0.00001f; + descriptor.m_CellIntermediateScale = 0.00001f; + descriptor.m_OutputIntermediateScale = 0.00001f; + + descriptor.m_HiddenStateScale = 0.07f; + descriptor.m_HiddenStateZeroPoint = 0; + + const unsigned int numBatches = 2; + const unsigned int inputSize = 5; + const unsigned int outputSize = 4; + const unsigned int numUnits = 4; + + // Scale/Offset quantization info + float inputScale = 0.0078f; + int32_t inputOffset = 0; + + float outputScale = 0.0078f; + int32_t outputOffset = 0; + + float cellStateScale = 3.5002e-05f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.007f; + int32_t weightsOffset = 0; + + float biasScale = 3.5002e-05f / 1024; + int32_t biasOffset = 0; + + // Weights and bias tensor and quantization info + armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset); + + std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + + armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData); + armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData); + armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData); + + std::vector recurrentToForgetWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector recurrentToCellWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector recurrentToOutputWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + + armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData); + armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData); + armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData); + + std::vector forgetGateBiasData(numUnits, 1); + std::vector cellBiasData(numUnits, 0); + std::vector outputGateBiasData(numUnits, 0); + + armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData); + armnn::ConstTensor cellBias(biasInfo, cellBiasData); + armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData); + + // Set up params + armnn::LstmInputParams params; + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + // Create network + armnn::INetworkPtr network = armnn::INetwork::Create(); + const std::string layerName("qLstm"); + + armnn::IConnectableLayer* const input = network->AddInputLayer(0); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(2); + + armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str()); + + armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(1); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(2); + + // Input/Output tensor info + armnn::TensorInfo inputInfo({numBatches , inputSize}, + armnn::DataType::QAsymmS8, + inputScale, + inputOffset); + + armnn::TensorInfo cellStateInfo({numBatches , numUnits}, + armnn::DataType::QSymmS16, + cellStateScale, + cellStateOffset); + + armnn::TensorInfo outputStateInfo({numBatches , outputSize}, + armnn::DataType::QAsymmS8, + outputScale, + outputOffset); + + // Connect input/output slots + input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0)); + input->GetOutputSlot(0).SetTensorInfo(inputInfo); + + outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo); + + cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo); + + qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo); + + qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo); + + qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyLstmLayer checker( + layerName, + {inputInfo, cellStateInfo, outputStateInfo}, + {outputStateInfo, cellStateInfo, outputStateInfo}, + descriptor, + params); + + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmCifgLayerNorm) +{ + armnn::QLstmDescriptor descriptor; + + // CIFG params are used when CIFG is disabled + descriptor.m_CifgEnabled = true; + descriptor.m_ProjectionEnabled = false; + descriptor.m_PeepholeEnabled = false; + descriptor.m_LayerNormEnabled = true; + + descriptor.m_CellClip = 0.0f; + descriptor.m_ProjectionClip = 0.0f; + + descriptor.m_InputIntermediateScale = 0.00001f; + descriptor.m_ForgetIntermediateScale = 0.00001f; + descriptor.m_CellIntermediateScale = 0.00001f; + descriptor.m_OutputIntermediateScale = 0.00001f; + + descriptor.m_HiddenStateScale = 0.07f; + descriptor.m_HiddenStateZeroPoint = 0; + + const unsigned int numBatches = 2; + const unsigned int inputSize = 5; + const unsigned int outputSize = 4; + const unsigned int numUnits = 4; + + // Scale/Offset quantization info + float inputScale = 0.0078f; + int32_t inputOffset = 0; + + float outputScale = 0.0078f; + int32_t outputOffset = 0; + + float cellStateScale = 3.5002e-05f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.007f; + int32_t weightsOffset = 0; + + float layerNormScale = 3.5002e-05f; + int32_t layerNormOffset = 0; + + float biasScale = layerNormScale / 1024; + int32_t biasOffset = 0; + + // Weights and bias tensor and quantization info + armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo biasInfo({numUnits}, + armnn::DataType::Signed32, + biasScale, + biasOffset); + + armnn::TensorInfo layerNormWeightsInfo({numUnits}, + armnn::DataType::QSymmS16, + layerNormScale, + layerNormOffset); + + // Mandatory params + std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + + armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData); + armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData); + armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData); + + std::vector recurrentToForgetWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector recurrentToCellWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector recurrentToOutputWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + + armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData); + armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData); + armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData); + + std::vector forgetGateBiasData(numUnits, 1); + std::vector cellBiasData(numUnits, 0); + std::vector outputGateBiasData(numUnits, 0); + + armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData); + armnn::ConstTensor cellBias(biasInfo, cellBiasData); + armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData); + + // Layer Norm + std::vector forgetLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + std::vector cellLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + std::vector outputLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + + armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData); + armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData); + armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData); + + // Set up params + armnn::LstmInputParams params; + + // Mandatory params + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + // Layer Norm + params.m_ForgetLayerNormWeights = &forgetLayerNormWeights; + params.m_CellLayerNormWeights = &cellLayerNormWeights; + params.m_OutputLayerNormWeights = &outputLayerNormWeights; + + // Create network + armnn::INetworkPtr network = armnn::INetwork::Create(); + const std::string layerName("qLstm"); + + armnn::IConnectableLayer* const input = network->AddInputLayer(0); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(2); + + armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str()); + + armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(1); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(2); + + // Input/Output tensor info + armnn::TensorInfo inputInfo({numBatches , inputSize}, + armnn::DataType::QAsymmS8, + inputScale, + inputOffset); + + armnn::TensorInfo cellStateInfo({numBatches , numUnits}, + armnn::DataType::QSymmS16, + cellStateScale, + cellStateOffset); + + armnn::TensorInfo outputStateInfo({numBatches , outputSize}, + armnn::DataType::QAsymmS8, + outputScale, + outputOffset); + + // Connect input/output slots + input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0)); + input->GetOutputSlot(0).SetTensorInfo(inputInfo); + + outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo); + + cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo); + + qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo); + + qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo); + + qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyLstmLayer checker(layerName, + {inputInfo, cellStateInfo, outputStateInfo}, + {outputStateInfo, cellStateInfo, outputStateInfo}, + descriptor, + params); + + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmAdvanced) +{ + armnn::QLstmDescriptor descriptor; + + descriptor.m_CifgEnabled = false; + descriptor.m_ProjectionEnabled = true; + descriptor.m_PeepholeEnabled = true; + descriptor.m_LayerNormEnabled = true; + + descriptor.m_CellClip = 0.1f; + descriptor.m_ProjectionClip = 0.1f; + + descriptor.m_InputIntermediateScale = 0.00001f; + descriptor.m_ForgetIntermediateScale = 0.00001f; + descriptor.m_CellIntermediateScale = 0.00001f; + descriptor.m_OutputIntermediateScale = 0.00001f; + + descriptor.m_HiddenStateScale = 0.07f; + descriptor.m_HiddenStateZeroPoint = 0; + + const unsigned int numBatches = 2; + const unsigned int inputSize = 5; + const unsigned int outputSize = 4; + const unsigned int numUnits = 4; + + // Scale/Offset quantization info + float inputScale = 0.0078f; + int32_t inputOffset = 0; + + float outputScale = 0.0078f; + int32_t outputOffset = 0; + + float cellStateScale = 3.5002e-05f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.007f; + int32_t weightsOffset = 0; + + float layerNormScale = 3.5002e-05f; + int32_t layerNormOffset = 0; + + float biasScale = layerNormScale / 1024; + int32_t biasOffset = 0; + + // Weights and bias tensor and quantization info + armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo biasInfo({numUnits}, + armnn::DataType::Signed32, + biasScale, + biasOffset); + + armnn::TensorInfo peepholeWeightsInfo({numUnits}, + armnn::DataType::QSymmS16, + weightsScale, + weightsOffset); + + armnn::TensorInfo layerNormWeightsInfo({numUnits}, + armnn::DataType::QSymmS16, + layerNormScale, + layerNormOffset); + + armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + // Mandatory params + std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + + armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData); + armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData); + armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData); + + std::vector recurrentToForgetWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector recurrentToCellWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector recurrentToOutputWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + + armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData); + armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData); + armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData); + + std::vector forgetGateBiasData(numUnits, 1); + std::vector cellBiasData(numUnits, 0); + std::vector outputGateBiasData(numUnits, 0); + + armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData); + armnn::ConstTensor cellBias(biasInfo, cellBiasData); + armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData); + + // CIFG + std::vector inputToInputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); + std::vector recurrentToInputWeightsData = + GenerateRandomData(recurrentWeightsInfo.GetNumElements()); + std::vector inputGateBiasData(numUnits, 1); + + armnn::ConstTensor inputToInputWeights(inputWeightsInfo, inputToInputWeightsData); + armnn::ConstTensor recurrentToInputWeights(recurrentWeightsInfo, recurrentToInputWeightsData); + armnn::ConstTensor inputGateBias(biasInfo, inputGateBiasData); + + // Peephole + std::vector cellToInputWeightsData = GenerateRandomData(peepholeWeightsInfo.GetNumElements()); + std::vector cellToForgetWeightsData = GenerateRandomData(peepholeWeightsInfo.GetNumElements()); + std::vector cellToOutputWeightsData = GenerateRandomData(peepholeWeightsInfo.GetNumElements()); + + armnn::ConstTensor cellToInputWeights(peepholeWeightsInfo, cellToInputWeightsData); + armnn::ConstTensor cellToForgetWeights(peepholeWeightsInfo, cellToForgetWeightsData); + armnn::ConstTensor cellToOutputWeights(peepholeWeightsInfo, cellToOutputWeightsData); + + // Projection + std::vector projectionWeightsData = GenerateRandomData(projectionWeightsInfo.GetNumElements()); + std::vector projectionBiasData(outputSize, 1); + + armnn::ConstTensor projectionWeights(projectionWeightsInfo, projectionWeightsData); + armnn::ConstTensor projectionBias(biasInfo, projectionBiasData); + + // Layer Norm + std::vector inputLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + std::vector forgetLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + std::vector cellLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + std::vector outputLayerNormWeightsData = + GenerateRandomData(layerNormWeightsInfo.GetNumElements()); + + armnn::ConstTensor inputLayerNormWeights(layerNormWeightsInfo, inputLayerNormWeightsData); + armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData); + armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData); + armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData); + + // Set up params + armnn::LstmInputParams params; + + // Mandatory params + params.m_InputToForgetWeights = &inputToForgetWeights; + params.m_InputToCellWeights = &inputToCellWeights; + params.m_InputToOutputWeights = &inputToOutputWeights; + + params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + params.m_RecurrentToCellWeights = &recurrentToCellWeights; + params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + + params.m_ForgetGateBias = &forgetGateBias; + params.m_CellBias = &cellBias; + params.m_OutputGateBias = &outputGateBias; + + // CIFG + params.m_InputToInputWeights = &inputToInputWeights; + params.m_RecurrentToInputWeights = &recurrentToInputWeights; + params.m_InputGateBias = &inputGateBias; + + // Peephole + params.m_CellToInputWeights = &cellToInputWeights; + params.m_CellToForgetWeights = &cellToForgetWeights; + params.m_CellToOutputWeights = &cellToOutputWeights; + + // Projection + params.m_ProjectionWeights = &projectionWeights; + params.m_ProjectionBias = &projectionBias; + + // Layer Norm + params.m_InputLayerNormWeights = &inputLayerNormWeights; + params.m_ForgetLayerNormWeights = &forgetLayerNormWeights; + params.m_CellLayerNormWeights = &cellLayerNormWeights; + params.m_OutputLayerNormWeights = &outputLayerNormWeights; + + // Create network + armnn::INetworkPtr network = armnn::INetwork::Create(); + const std::string layerName("qLstm"); + + armnn::IConnectableLayer* const input = network->AddInputLayer(0); + armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1); + armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(2); + + armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str()); + + armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0); + armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(1); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(2); + + // Input/Output tensor info + armnn::TensorInfo inputInfo({numBatches , inputSize}, + armnn::DataType::QAsymmS8, + inputScale, + inputOffset); + + armnn::TensorInfo cellStateInfo({numBatches , numUnits}, + armnn::DataType::QSymmS16, + cellStateScale, + cellStateOffset); + + armnn::TensorInfo outputStateInfo({numBatches , outputSize}, + armnn::DataType::QAsymmS8, + outputScale, + outputOffset); + + // Connect input/output slots + input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0)); + input->GetOutputSlot(0).SetTensorInfo(inputInfo); + + outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1)); + outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo); + + cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2)); + cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo); + + qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo); + + qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo); + + qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0)); + qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyLstmLayer checker(layerName, + {inputInfo, cellStateInfo, outputStateInfo}, + {outputStateInfo, cellStateInfo, outputStateInfo}, + descriptor, + params); + + deserializedNetwork->ExecuteStrategy(checker); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnSerializer/test/SerializerTestUtils.cpp b/src/armnnSerializer/test/SerializerTestUtils.cpp new file mode 100644 index 0000000000..586d2a05a5 --- /dev/null +++ b/src/armnnSerializer/test/SerializerTestUtils.cpp @@ -0,0 +1,163 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SerializerTestUtils.hpp" +#include "../Serializer.hpp" + +using armnnDeserializer::IDeserializer; + +LayerVerifierBase::LayerVerifierBase(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos) + : m_LayerName(layerName) + , m_InputTensorInfos(inputInfos) + , m_OutputTensorInfos(outputInfos) +{} + +void LayerVerifierBase::ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id) +{ + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + default: + { + VerifyNameAndConnections(layer, name); + } + } +} + + +void LayerVerifierBase::VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name) +{ + BOOST_TEST(name == m_LayerName.c_str()); + + BOOST_TEST(layer->GetNumInputSlots() == m_InputTensorInfos.size()); + BOOST_TEST(layer->GetNumOutputSlots() == m_OutputTensorInfos.size()); + + for (unsigned int i = 0; i < m_InputTensorInfos.size(); i++) + { + const armnn::IOutputSlot* connectedOutput = layer->GetInputSlot(i).GetConnection(); + BOOST_CHECK(connectedOutput); + + const armnn::TensorInfo& connectedInfo = connectedOutput->GetTensorInfo(); + BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape()); + BOOST_TEST( + GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType())); + + BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale()); + BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset()); + } + + for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++) + { + const armnn::TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo(); + BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape()); + BOOST_TEST( + GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType())); + + BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale()); + BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset()); + } +} + +void LayerVerifierBase::VerifyConstTensors(const std::string& tensorName, + const armnn::ConstTensor* expectedPtr, + const armnn::ConstTensor* actualPtr) +{ + if (expectedPtr == nullptr) + { + BOOST_CHECK_MESSAGE(actualPtr == nullptr, tensorName + " should not exist"); + } + else + { + BOOST_CHECK_MESSAGE(actualPtr != nullptr, tensorName + " should have been set"); + if (actualPtr != nullptr) + { + const armnn::TensorInfo& expectedInfo = expectedPtr->GetInfo(); + const armnn::TensorInfo& actualInfo = actualPtr->GetInfo(); + + BOOST_CHECK_MESSAGE(expectedInfo.GetShape() == actualInfo.GetShape(), + tensorName + " shapes don't match"); + BOOST_CHECK_MESSAGE( + GetDataTypeName(expectedInfo.GetDataType()) == GetDataTypeName(actualInfo.GetDataType()), + tensorName + " data types don't match"); + + BOOST_CHECK_MESSAGE(expectedPtr->GetNumBytes() == actualPtr->GetNumBytes(), + tensorName + " (GetNumBytes) data sizes do not match"); + if (expectedPtr->GetNumBytes() == actualPtr->GetNumBytes()) + { + //check the data is identical + const char* expectedData = static_cast(expectedPtr->GetMemoryArea()); + const char* actualData = static_cast(actualPtr->GetMemoryArea()); + bool same = true; + for (unsigned int i = 0; i < expectedPtr->GetNumBytes(); ++i) + { + same = expectedData[i] == actualData[i]; + if (!same) + { + break; + } + } + BOOST_CHECK_MESSAGE(same, tensorName + " data does not match"); + } + } + } +} + +void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2) +{ + BOOST_TEST(tensor1.GetShape() == tensor2.GetShape()); + BOOST_TEST(GetDataTypeName(tensor1.GetDataType()) == GetDataTypeName(tensor2.GetDataType())); + + switch (tensor1.GetDataType()) + { + case armnn::DataType::Float32: + CompareConstTensorData( + tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); + break; + case armnn::DataType::QAsymmU8: + case armnn::DataType::Boolean: + CompareConstTensorData( + tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); + break; + case armnn::DataType::QSymmS8: + CompareConstTensorData( + tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); + break; + case armnn::DataType::Signed32: + CompareConstTensorData( + tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); + break; + default: + // Note that Float16 is not yet implemented + BOOST_TEST_MESSAGE("Unexpected datatype"); + BOOST_TEST(false); + } +} + +armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString) +{ + std::vector const serializerVector{serializerString.begin(), serializerString.end()}; + return IDeserializer::Create()->CreateNetworkFromBinary(serializerVector); +} + +std::string SerializeNetwork(const armnn::INetwork& network) +{ + armnnSerializer::ISerializerPtr serializer = armnnSerializer::ISerializer::Create(); + + serializer->Serialize(network); + + std::stringstream stream; + serializer->SaveSerializedToStream(stream); + + std::string serializerString{stream.str()}; + return serializerString; +} diff --git a/src/armnnSerializer/test/SerializerTestUtils.hpp b/src/armnnSerializer/test/SerializerTestUtils.hpp new file mode 100644 index 0000000000..e085d2ef15 --- /dev/null +++ b/src/armnnSerializer/test/SerializerTestUtils.hpp @@ -0,0 +1,167 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include +#include +#include +#include + +#include +#include + +#include + + +armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString); + +std::string SerializeNetwork(const armnn::INetwork& network); + +void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2); + +class LayerVerifierBase : public armnn::IStrategy +{ +public: + LayerVerifierBase(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos); + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override; + +protected: + void VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name); + + void VerifyConstTensors(const std::string& tensorName, + const armnn::ConstTensor* expectedPtr, + const armnn::ConstTensor* actualPtr); + +private: + std::string m_LayerName; + std::vector m_InputTensorInfos; + std::vector m_OutputTensorInfos; +}; + +template +class LayerVerifierBaseWithDescriptor : public LayerVerifierBase +{ +public: + LayerVerifierBaseWithDescriptor(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos, + const Descriptor& descriptor) + : LayerVerifierBase(layerName, inputInfos, outputInfos) + , m_Descriptor(descriptor) {} + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override + { + armnn::IgnoreUnused(constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + default: + { + VerifyNameAndConnections(layer, name); + const Descriptor& internalDescriptor = static_cast(descriptor); + VerifyDescriptor(internalDescriptor); + break; + } + } + } + +protected: + void VerifyDescriptor(const Descriptor& descriptor) + { + BOOST_CHECK(descriptor == m_Descriptor); + } + + Descriptor m_Descriptor; +}; + +template +void CompareConstTensorData(const void* data1, const void* data2, unsigned int numElements) +{ + T typedData1 = static_cast(data1); + T typedData2 = static_cast(data2); + BOOST_CHECK(typedData1); + BOOST_CHECK(typedData2); + + for (unsigned int i = 0; i < numElements; i++) + { + BOOST_TEST(typedData1[i] == typedData2[i]); + } +} + + +template +class LayerVerifierBaseWithDescriptorAndConstants : public LayerVerifierBaseWithDescriptor +{ +public: + LayerVerifierBaseWithDescriptorAndConstants(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos, + const Descriptor& descriptor, + const std::vector& constants) + : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) + , m_Constants(constants) {} + + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override + { + armnn::IgnoreUnused(id); + + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + default: + { + this->VerifyNameAndConnections(layer, name); + const Descriptor& internalDescriptor = static_cast(descriptor); + this->VerifyDescriptor(internalDescriptor); + + for(std::size_t i = 0; i < constants.size(); i++) + { + CompareConstTensor(constants[i], m_Constants[i]); + } + } + } + } + +private: + std::vector m_Constants; +}; + +template +static std::vector GenerateRandomData(size_t size) +{ + constexpr bool isIntegerType = std::is_integral::value; + using Distribution = + typename std::conditional, + std::uniform_real_distribution>::type; + + static constexpr DataType lowerLimit = std::numeric_limits::min(); + static constexpr DataType upperLimit = std::numeric_limits::max(); + + static Distribution distribution(lowerLimit, upperLimit); + static std::default_random_engine generator; + + std::vector randomData(size); + std::generate(randomData.begin(), randomData.end(), []() { return distribution(generator); }); + + return randomData; +} \ No newline at end of file diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 44e8a3898e..f261731a75 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -4,6 +4,7 @@ // #include "../Serializer.hpp" +#include "SerializerTestUtils.hpp" #include #include @@ -11,6 +12,7 @@ #include #include #include +#include #include #include @@ -19,264 +21,36 @@ using armnnDeserializer::IDeserializer; -namespace -{ - -#define DECLARE_LAYER_VERIFIER_CLASS(name) \ -class name##LayerVerifier : public LayerVerifierBase \ -{ \ -public: \ - name##LayerVerifier(const std::string& layerName, \ - const std::vector& inputInfos, \ - const std::vector& outputInfos) \ - : LayerVerifierBase(layerName, inputInfos, outputInfos) {} \ -\ - void Visit##name##Layer(const armnn::IConnectableLayer* layer, const char* name) override \ - { \ - VerifyNameAndConnections(layer, name); \ - } \ -}; - -#define DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(name) \ -class name##LayerVerifier : public LayerVerifierBaseWithDescriptor \ -{ \ -public: \ - name##LayerVerifier(const std::string& layerName, \ - const std::vector& inputInfos, \ - const std::vector& outputInfos, \ - const armnn::name##Descriptor& descriptor) \ - : LayerVerifierBaseWithDescriptor( \ - layerName, inputInfos, outputInfos, descriptor) {} \ -\ - void Visit##name##Layer(const armnn::IConnectableLayer* layer, \ - const armnn::name##Descriptor& descriptor, \ - const char* name) override \ - { \ - VerifyNameAndConnections(layer, name); \ - VerifyDescriptor(descriptor); \ - } \ -}; - -struct DefaultLayerVerifierPolicy -{ - static void Apply(const std::string) - { - BOOST_TEST_MESSAGE("Unexpected layer found in network"); - BOOST_TEST(false); - } -}; - -class LayerVerifierBase : public armnn::LayerVisitorBase -{ -public: - LayerVerifierBase(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos) - : m_LayerName(layerName) - , m_InputTensorInfos(inputInfos) - , m_OutputTensorInfos(outputInfos) {} - - void VisitInputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {} - - void VisitOutputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {} - -protected: - void VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name) - { - BOOST_TEST(name == m_LayerName.c_str()); - - BOOST_TEST(layer->GetNumInputSlots() == m_InputTensorInfos.size()); - BOOST_TEST(layer->GetNumOutputSlots() == m_OutputTensorInfos.size()); - - for (unsigned int i = 0; i < m_InputTensorInfos.size(); i++) - { - const armnn::IOutputSlot* connectedOutput = layer->GetInputSlot(i).GetConnection(); - BOOST_CHECK(connectedOutput); - - const armnn::TensorInfo& connectedInfo = connectedOutput->GetTensorInfo(); - BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape()); - BOOST_TEST( - GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType())); - - BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale()); - BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset()); - } - - for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++) - { - const armnn::TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo(); - BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape()); - BOOST_TEST( - GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType())); - - BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale()); - BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset()); - } - } - - void VerifyConstTensors(const std::string& tensorName, - const armnn::ConstTensor* expectedPtr, - const armnn::ConstTensor* actualPtr) - { - if (expectedPtr == nullptr) - { - BOOST_CHECK_MESSAGE(actualPtr == nullptr, tensorName + " should not exist"); - } - else - { - BOOST_CHECK_MESSAGE(actualPtr != nullptr, tensorName + " should have been set"); - if (actualPtr != nullptr) - { - const armnn::TensorInfo& expectedInfo = expectedPtr->GetInfo(); - const armnn::TensorInfo& actualInfo = actualPtr->GetInfo(); - - BOOST_CHECK_MESSAGE(expectedInfo.GetShape() == actualInfo.GetShape(), - tensorName + " shapes don't match"); - BOOST_CHECK_MESSAGE( - GetDataTypeName(expectedInfo.GetDataType()) == GetDataTypeName(actualInfo.GetDataType()), - tensorName + " data types don't match"); - - BOOST_CHECK_MESSAGE(expectedPtr->GetNumBytes() == actualPtr->GetNumBytes(), - tensorName + " (GetNumBytes) data sizes do not match"); - if (expectedPtr->GetNumBytes() == actualPtr->GetNumBytes()) - { - //check the data is identical - const char* expectedData = static_cast(expectedPtr->GetMemoryArea()); - const char* actualData = static_cast(actualPtr->GetMemoryArea()); - bool same = true; - for (unsigned int i = 0; i < expectedPtr->GetNumBytes(); ++i) - { - same = expectedData[i] == actualData[i]; - if (!same) - { - break; - } - } - BOOST_CHECK_MESSAGE(same, tensorName + " data does not match"); - } - } - } - } - -private: - std::string m_LayerName; - std::vector m_InputTensorInfos; - std::vector m_OutputTensorInfos; -}; - -template -class LayerVerifierBaseWithDescriptor : public LayerVerifierBase -{ -public: - LayerVerifierBaseWithDescriptor(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor) - : LayerVerifierBase(layerName, inputInfos, outputInfos) - , m_Descriptor(descriptor) {} - -protected: - void VerifyDescriptor(const Descriptor& descriptor) - { - BOOST_CHECK(descriptor == m_Descriptor); - } - - Descriptor m_Descriptor; -}; - -template -void CompareConstTensorData(const void* data1, const void* data2, unsigned int numElements) -{ - T typedData1 = static_cast(data1); - T typedData2 = static_cast(data2); - BOOST_CHECK(typedData1); - BOOST_CHECK(typedData2); - - for (unsigned int i = 0; i < numElements; i++) - { - BOOST_TEST(typedData1[i] == typedData2[i]); - } -} - -void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2) -{ - BOOST_TEST(tensor1.GetShape() == tensor2.GetShape()); - BOOST_TEST(GetDataTypeName(tensor1.GetDataType()) == GetDataTypeName(tensor2.GetDataType())); - - switch (tensor1.GetDataType()) - { - case armnn::DataType::Float32: - CompareConstTensorData( - tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); - break; - case armnn::DataType::QAsymmU8: - case armnn::DataType::Boolean: - CompareConstTensorData( - tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); - break; - case armnn::DataType::QSymmS8: - CompareConstTensorData( - tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); - break; - case armnn::DataType::Signed32: - CompareConstTensorData( - tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements()); - break; - default: - // Note that Float16 is not yet implemented - BOOST_TEST_MESSAGE("Unexpected datatype"); - BOOST_TEST(false); - } -} - -armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString) -{ - std::vector const serializerVector{serializerString.begin(), serializerString.end()}; - return IDeserializer::Create()->CreateNetworkFromBinary(serializerVector); -} +BOOST_AUTO_TEST_SUITE(SerializerTests) -std::string SerializeNetwork(const armnn::INetwork& network) +BOOST_AUTO_TEST_CASE(SerializeAbs) { - armnnSerializer::ISerializerPtr serializer = armnnSerializer::ISerializer::Create(); - - serializer->Serialize(network); - - std::stringstream stream; - serializer->SaveSerializedToStream(stream); + const std::string layerName("abs"); + const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32); - std::string serializerString{stream.str()}; - return serializerString; -} + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); -template -static std::vector GenerateRandomData(size_t size) -{ - constexpr bool isIntegerType = std::is_integral::value; - using Distribution = - typename std::conditional, - std::uniform_real_distribution>::type; + ARMNN_NO_DEPRECATE_WARN_BEGIN + armnn::IConnectableLayer* const absLayer = network->AddAbsLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); - static constexpr DataType lowerLimit = std::numeric_limits::min(); - static constexpr DataType upperLimit = std::numeric_limits::max(); + inputLayer->GetOutputSlot(0).Connect(absLayer->GetInputSlot(0)); + absLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - static Distribution distribution(lowerLimit, upperLimit); - static std::default_random_engine generator; + inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); + absLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - std::vector randomData(size); - std::generate(randomData.begin(), randomData.end(), []() { return distribution(generator); }); + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); - return randomData; + LayerVerifierBase verifier(layerName, {tensorInfo}, {tensorInfo}); + deserializedNetwork->ExecuteStrategy(verifier); } -} // anonymous namespace - -BOOST_AUTO_TEST_SUITE(SerializerTests) - BOOST_AUTO_TEST_CASE(SerializeAddition) { - DECLARE_LAYER_VERIFIER_CLASS(Addition) - const std::string layerName("addition"); const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32); @@ -294,17 +68,16 @@ BOOST_AUTO_TEST_CASE(SerializeAddition) inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo); additionLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + std::string serializedNetwork = SerializeNetwork(*network); + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(serializedNetwork); BOOST_CHECK(deserializedNetwork); - AdditionLayerVerifier verifier(layerName, {tensorInfo, tensorInfo}, {tensorInfo}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {tensorInfo, tensorInfo}, {tensorInfo}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeArgMinMax) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(ArgMinMax) - const std::string layerName("argminmax"); const armnn::TensorInfo inputInfo({1, 2, 3}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({1, 3}, armnn::DataType::Signed32); @@ -327,54 +100,15 @@ BOOST_AUTO_TEST_CASE(SerializeArgMinMax) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - ArgMinMaxLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, + {inputInfo}, + {outputInfo}, + descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeBatchNormalization) { - using Descriptor = armnn::BatchNormalizationDescriptor; - class BatchNormalizationLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - BatchNormalizationLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& mean, - const armnn::ConstTensor& variance, - const armnn::ConstTensor& beta, - const armnn::ConstTensor& gamma) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_Mean(mean) - , m_Variance(variance) - , m_Beta(beta) - , m_Gamma(gamma) {} - - void VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& mean, - const armnn::ConstTensor& variance, - const armnn::ConstTensor& beta, - const armnn::ConstTensor& gamma, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - CompareConstTensor(mean, m_Mean); - CompareConstTensor(variance, m_Variance); - CompareConstTensor(beta, m_Beta); - CompareConstTensor(gamma, m_Gamma); - } - - private: - armnn::ConstTensor m_Mean; - armnn::ConstTensor m_Variance; - armnn::ConstTensor m_Beta; - armnn::ConstTensor m_Gamma; - }; - const std::string layerName("batchNormalization"); const armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); @@ -393,15 +127,21 @@ BOOST_AUTO_TEST_CASE(SerializeBatchNormalization) std::vector betaData({1.0}); std::vector gammaData({0.0}); - armnn::ConstTensor mean(meanInfo, meanData); - armnn::ConstTensor variance(varianceInfo, varianceData); - armnn::ConstTensor beta(betaInfo, betaData); - armnn::ConstTensor gamma(gammaInfo, gammaData); + std::vector constants; + constants.emplace_back(armnn::ConstTensor(meanInfo, meanData)); + constants.emplace_back(armnn::ConstTensor(varianceInfo, varianceData)); + constants.emplace_back(armnn::ConstTensor(betaInfo, betaData)); + constants.emplace_back(armnn::ConstTensor(gammaInfo, gammaData)); armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); armnn::IConnectableLayer* const batchNormalizationLayer = - network->AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma, layerName.c_str()); + network->AddBatchNormalizationLayer(descriptor, + constants[0], + constants[1], + constants[2], + constants[3], + layerName.c_str()); armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0)); @@ -413,15 +153,13 @@ BOOST_AUTO_TEST_CASE(SerializeBatchNormalization) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - BatchNormalizationLayerVerifier verifier( - layerName, {inputInfo}, {outputInfo}, descriptor, mean, variance, beta, gamma); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeBatchToSpaceNd) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(BatchToSpaceNd) - const std::string layerName("spaceToBatchNd"); const armnn::TensorInfo inputInfo({4, 1, 2, 2}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({1, 1, 4, 4}, armnn::DataType::Float32); @@ -445,14 +183,15 @@ BOOST_AUTO_TEST_CASE(SerializeBatchToSpaceNd) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - BatchToSpaceNdLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, + {inputInfo}, + {outputInfo}, + desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeComparison) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Comparison) - const std::string layerName("comparison"); const armnn::TensorShape shape{2, 1, 2, 4}; @@ -479,8 +218,11 @@ BOOST_AUTO_TEST_CASE(SerializeComparison) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, + { inputInfo, inputInfo }, + { outputInfo }, + descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeConstant) @@ -491,22 +233,37 @@ BOOST_AUTO_TEST_CASE(SerializeConstant) ConstantLayerVerifier(const std::string& layerName, const std::vector& inputInfos, const std::vector& outputInfos, - const armnn::ConstTensor& layerInput) + const std::vector& constants) : LayerVerifierBase(layerName, inputInfos, outputInfos) - , m_LayerInput(layerInput) {} + , m_Constants(constants) {} - void VisitConstantLayer(const armnn::IConnectableLayer* layer, - const armnn::ConstTensor& input, - const char* name) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - VerifyNameAndConnections(layer, name); - CompareConstTensor(input, m_LayerInput); - } + armnn::IgnoreUnused(descriptor, id); + + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Addition: break; + default: + { + this->VerifyNameAndConnections(layer, name); - void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {} + for (std::size_t i = 0; i < constants.size(); i++) + { + CompareConstTensor(constants[i], m_Constants[i]); + } + } + } + } private: - armnn::ConstTensor m_LayerInput; + const std::vector m_Constants; }; const std::string layerName("constant"); @@ -532,53 +289,12 @@ BOOST_AUTO_TEST_CASE(SerializeConstant) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - ConstantLayerVerifier verifier(layerName, {}, {info}, constTensor); - deserializedNetwork->Accept(verifier); + ConstantLayerVerifier verifier(layerName, {}, {info}, {constTensor}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeConvolution2d) { - using Descriptor = armnn::Convolution2dDescriptor; - class Convolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - Convolution2dLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_Weights(weights) - , m_Biases(biases) {} - - void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - // check weights - CompareConstTensor(weights, m_Weights); - - // check biases - BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled); - BOOST_CHECK(biases.has_value() == m_Biases.has_value()); - - if (biases.has_value() && m_Biases.has_value()) - { - CompareConstTensor(biases.value(), m_Biases.value()); - } - } - - private: - armnn::ConstTensor m_Weights; - armnn::Optional m_Biases; - }; - const std::string layerName("convolution2d"); const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); @@ -622,53 +338,14 @@ BOOST_AUTO_TEST_CASE(SerializeConvolution2d) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - Convolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases); - deserializedNetwork->Accept(verifier); + const std::vector& constants {weights, biases}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeConvolution2dWithPerAxisParams) { - using Descriptor = armnn::Convolution2dDescriptor; - class Convolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - Convolution2dLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_Weights(weights) - , m_Biases(biases) {} - - void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - // check weights - CompareConstTensor(weights, m_Weights); - - // check biases - BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled); - BOOST_CHECK(biases.has_value() == m_Biases.has_value()); - - if (biases.has_value() && m_Biases.has_value()) - { - CompareConstTensor(biases.value(), m_Biases.value()); - } - } - - private: - armnn::ConstTensor m_Weights; - armnn::Optional m_Biases; - }; - using namespace armnn; const std::string layerName("convolution2dWithPerAxis"); @@ -716,14 +393,14 @@ BOOST_AUTO_TEST_CASE(SerializeConvolution2dWithPerAxisParams) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - Convolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases); - deserializedNetwork->Accept(verifier); + const std::vector& constants {weights, biases}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDepthToSpace) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(DepthToSpace) - const std::string layerName("depthToSpace"); const armnn::TensorInfo inputInfo ({ 1, 8, 4, 12 }, armnn::DataType::Float32); @@ -747,53 +424,12 @@ BOOST_AUTO_TEST_CASE(SerializeDepthToSpace) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - DepthToSpaceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {inputInfo}, {outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2d) { - using Descriptor = armnn::DepthwiseConvolution2dDescriptor; - class DepthwiseConvolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - DepthwiseConvolution2dLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases) : - LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor), - m_Weights(weights), - m_Biases(biases) {} - - void VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - // check weights - CompareConstTensor(weights, m_Weights); - - // check biases - BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled); - BOOST_CHECK(biases.has_value() == m_Biases.has_value()); - - if (biases.has_value() && m_Biases.has_value()) - { - CompareConstTensor(biases.value(), m_Biases.value()); - } - } - - private: - armnn::ConstTensor m_Weights; - armnn::Optional m_Biases; - }; - const std::string layerName("depwiseConvolution2d"); const armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32); @@ -837,53 +473,14 @@ BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2d) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - DepthwiseConvolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases); - deserializedNetwork->Accept(verifier); + const std::vector& constants {weights, biases}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2dWithPerAxisParams) { - using Descriptor = armnn::DepthwiseConvolution2dDescriptor; - class DepthwiseConvolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - DepthwiseConvolution2dLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases) : - LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor), - m_Weights(weights), - m_Biases(biases) {} - - void VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - // check weights - CompareConstTensor(weights, m_Weights); - - // check biases - BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled); - BOOST_CHECK(biases.has_value() == m_Biases.has_value()); - - if (biases.has_value() && m_Biases.has_value()) - { - CompareConstTensor(biases.value(), m_Biases.value()); - } - } - - private: - armnn::ConstTensor m_Weights; - armnn::Optional m_Biases; - }; - using namespace armnn; const std::string layerName("depwiseConvolution2dWithPerAxis"); @@ -933,14 +530,14 @@ BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2dWithPerAxisParams) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - DepthwiseConvolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases); - deserializedNetwork->Accept(verifier); + const std::vector& constants {weights, biases}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDequantize) { - DECLARE_LAYER_VERIFIER_CLASS(Dequantize) - const std::string layerName("dequantize"); const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1); const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32); @@ -959,39 +556,12 @@ BOOST_AUTO_TEST_CASE(SerializeDequantize) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - DequantizeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDeserializeDetectionPostProcess) { - using Descriptor = armnn::DetectionPostProcessDescriptor; - class DetectionPostProcessLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - DetectionPostProcessLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& anchors) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_Anchors(anchors) {} - - void VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& anchors, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - CompareConstTensor(anchors, m_Anchors); - } - - private: - armnn::ConstTensor m_Anchors; - }; - const std::string layerName("detectionPostProcess"); const std::vector inputInfos({ @@ -1051,14 +621,14 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeDetectionPostProcess) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - DetectionPostProcessLayerVerifier verifier(layerName, inputInfos, outputInfos, descriptor, anchors); - deserializedNetwork->Accept(verifier); + const std::vector& constants {anchors}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, inputInfos, outputInfos, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDivision) { - DECLARE_LAYER_VERIFIER_CLASS(Division) - const std::string layerName("division"); const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32); @@ -1079,131 +649,41 @@ BOOST_AUTO_TEST_CASE(SerializeDivision) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - DivisionLayerVerifier verifier(layerName, {info, info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info, info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } -class EqualLayerVerifier : public LayerVerifierBase -{ -public: - EqualLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos) - : LayerVerifierBase(layerName, inputInfos, outputInfos) {} - - void VisitComparisonLayer(const armnn::IConnectableLayer* layer, - const armnn::ComparisonDescriptor& descriptor, - const char* name) override - { - VerifyNameAndConnections(layer, name); - BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Equal); - } - - void VisitEqualLayer(const armnn::IConnectableLayer*, const char*) override - { - throw armnn::Exception("EqualLayer should have translated to ComparisonLayer"); - } -}; - -// NOTE: Until the deprecated AddEqualLayer disappears this test checks that calling -// AddEqualLayer places a ComparisonLayer into the serialized format and that -// when this deserialises we have a ComparisonLayer -BOOST_AUTO_TEST_CASE(SerializeEqual) +BOOST_AUTO_TEST_CASE(SerializeDeserializeEqual) { - const std::string layerName("equal"); - - const armnn::TensorShape shape{2, 1, 2, 4}; - - const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + const std::string layerName("EqualLayer"); + const armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32); + const armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Boolean); armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); - armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(0); + armnn::IConnectableLayer* const inputLayer2 = network->AddInputLayer(1); ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const equalLayer = network->AddEqualLayer(layerName.c_str()); ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); - inputLayer0->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0)); - inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(1)); + inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0)); + inputLayer1->GetOutputSlot(0).SetTensorInfo(inputTensorInfo1); + inputLayer2->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(1)); + inputLayer2->GetOutputSlot(0).SetTensorInfo(inputTensorInfo2); equalLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - - inputLayer0->GetOutputSlot(0).SetTensorInfo(inputInfo); - inputLayer1->GetOutputSlot(0).SetTensorInfo(inputInfo); - equalLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + equalLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - EqualLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }); - deserializedNetwork->Accept(verifier); -} - -BOOST_AUTO_TEST_CASE(EnsureEqualBackwardCompatibility) -{ - // The hex data below is a flat buffer containing a simple network with two inputs, - // an EqualLayer (now deprecated) and an output - // - // This test verifies that we can still deserialize this old-style model by replacing - // the EqualLayer with an equivalent ComparisonLayer - const std::vector equalModel = - { - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, - 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xCC, 0x01, 0x00, 0x00, 0x20, 0x01, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x60, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xFE, 0xFF, 0xFF, 0x04, 0x00, - 0x00, 0x00, 0x06, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xEA, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x64, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0xFE, 0xFF, 0xFF, 0x00, 0x00, - 0x00, 0x13, 0x04, 0x00, 0x00, 0x00, 0x52, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x36, 0xFF, 0xFF, 0xFF, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1C, 0x00, - 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x71, 0x75, 0x61, 0x6C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x5C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x34, 0xFF, - 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x92, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00, - 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0E, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x66, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x04, 0x00, 0x00, 0x00, 0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, - 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00, - 0x07, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00 - }; - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(equalModel.begin(), equalModel.end())); - BOOST_CHECK(deserializedNetwork); - - const armnn::TensorShape shape{ 2, 1, 2, 4 }; - - const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); - - EqualLayerVerifier verifier("equal", { inputInfo, inputInfo }, { outputInfo }); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {inputTensorInfo1, inputTensorInfo2}, {outputTensorInfo}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeFill) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Fill) - const std::string layerName("fill"); const armnn::TensorInfo inputInfo({4}, armnn::DataType::Signed32); const armnn::TensorInfo outputInfo({1, 3, 3, 1}, armnn::DataType::Float32); @@ -1224,15 +704,13 @@ BOOST_AUTO_TEST_CASE(SerializeFill) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - FillLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor); + LayerVerifierBaseWithDescriptor verifier(layerName, {inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeFloor) { - DECLARE_LAYER_VERIFIER_CLASS(Floor) - const std::string layerName("floor"); const armnn::TensorInfo info({4,4}, armnn::DataType::Float32); @@ -1250,51 +728,12 @@ BOOST_AUTO_TEST_CASE(SerializeFloor) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - FloorLayerVerifier verifier(layerName, {info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeFullyConnected) { - using Descriptor = armnn::FullyConnectedDescriptor; - class FullyConnectedLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - FullyConnectedLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& weight, - const armnn::Optional& bias) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_Weight(weight) - , m_Bias(bias) {} - - void VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& weight, - const armnn::Optional& bias, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - CompareConstTensor(weight, m_Weight); - - BOOST_TEST(bias.has_value() == descriptor.m_BiasEnabled); - BOOST_TEST(bias.has_value() == m_Bias.has_value()); - - if (bias.has_value() && m_Bias.has_value()) - { - CompareConstTensor(bias.value(), m_Bias.value()); - } - } - - private: - armnn::ConstTensor m_Weight; - armnn::Optional m_Bias; - }; - const std::string layerName("fullyConnected"); const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32); @@ -1328,8 +767,10 @@ BOOST_AUTO_TEST_CASE(SerializeFullyConnected) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - FullyConnectedLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases); - deserializedNetwork->Accept(verifier); + const std::vector constants {weights, biases}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeGather) @@ -1344,17 +785,26 @@ BOOST_AUTO_TEST_CASE(SerializeGather) const GatherDescriptor& descriptor) : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) {} - void VisitGatherLayer(const armnn::IConnectableLayer* layer, - const GatherDescriptor& descriptor, - const char *name) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - VerifyNameAndConnections(layer, name); - BOOST_CHECK(descriptor.m_Axis == m_Descriptor.m_Axis); + armnn::IgnoreUnused(constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Constant: break; + default: + { + VerifyNameAndConnections(layer, name); + const GatherDescriptor& layerDescriptor = static_cast(descriptor); + BOOST_CHECK(layerDescriptor.m_Axis == m_Descriptor.m_Axis); + } + } } - - void VisitConstantLayer(const armnn::IConnectableLayer*, - const armnn::ConstTensor&, - const char*) override {} }; const std::string layerName("gather"); @@ -1390,35 +840,14 @@ BOOST_AUTO_TEST_CASE(SerializeGather) BOOST_CHECK(deserializedNetwork); GatherLayerVerifier verifier(layerName, {paramsInfo, indicesInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } -class GreaterLayerVerifier : public LayerVerifierBase -{ -public: - GreaterLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos) - : LayerVerifierBase(layerName, inputInfos, outputInfos) {} - - void VisitComparisonLayer(const armnn::IConnectableLayer* layer, - const armnn::ComparisonDescriptor& descriptor, - const char* name) override - { - VerifyNameAndConnections(layer, name); - BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Greater); - } - - void VisitGreaterLayer(const armnn::IConnectableLayer*, const char*) override - { - throw armnn::Exception("GreaterLayer should have translated to ComparisonLayer"); - } -}; // NOTE: Until the deprecated AddGreaterLayer disappears this test checks that calling // AddGreaterLayer places a ComparisonLayer into the serialized format and that // when this deserialises we have a ComparisonLayer -BOOST_AUTO_TEST_CASE(SerializeGreater) +BOOST_AUTO_TEST_CASE(SerializeGreaterDeprecated) { const std::string layerName("greater"); @@ -1446,74 +875,13 @@ BOOST_AUTO_TEST_CASE(SerializeGreater) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - GreaterLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, { inputInfo, inputInfo }, { outputInfo }); + deserializedNetwork->ExecuteStrategy(verifier); } -BOOST_AUTO_TEST_CASE(EnsureGreaterBackwardCompatibility) -{ - // The hex data below is a flat buffer containing a simple network with two inputs, - // an GreaterLayer (now deprecated) and an output - // - // This test verifies that we can still deserialize this old-style model by replacing - // the GreaterLayer with an equivalent ComparisonLayer - const std::vector greaterModel = - { - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, - 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xCC, 0x01, 0x00, 0x00, 0x20, 0x01, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x60, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xFE, 0xFF, 0xFF, 0x04, 0x00, - 0x00, 0x00, 0x06, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xEA, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x64, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0xFE, 0xFF, 0xFF, 0x00, 0x00, - 0x00, 0x19, 0x04, 0x00, 0x00, 0x00, 0x52, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x36, 0xFF, 0xFF, 0xFF, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1C, 0x00, - 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x67, 0x72, 0x65, 0x61, 0x74, 0x65, 0x72, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x5C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x34, 0xFF, - 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x92, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00, - 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0E, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0E, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x66, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x04, 0x00, 0x00, 0x00, 0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, - 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00, - 0x07, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00 - }; - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(greaterModel.begin(), greaterModel.end())); - BOOST_CHECK(deserializedNetwork); - - const armnn::TensorShape shape{ 1, 2, 2, 2 }; - - const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); - - GreaterLayerVerifier verifier("greater", { inputInfo, inputInfo }, { outputInfo }); - deserializedNetwork->Accept(verifier); -} BOOST_AUTO_TEST_CASE(SerializeInstanceNormalization) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(InstanceNormalization) - const std::string layerName("instanceNormalization"); const armnn::TensorInfo info({ 1, 2, 1, 5 }, armnn::DataType::Float32); @@ -1538,12 +906,11 @@ BOOST_AUTO_TEST_CASE(SerializeInstanceNormalization) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - InstanceNormalizationLayerVerifier verifier(layerName, {info}, {info}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {info}, {info}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } -DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(L2Normalization) - BOOST_AUTO_TEST_CASE(SerializeL2Normalization) { const std::string l2NormLayerName("l2Normalization"); @@ -1567,8 +934,9 @@ BOOST_AUTO_TEST_CASE(SerializeL2Normalization) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - L2NormalizationLayerVerifier verifier(l2NormLayerName, {info}, {info}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + l2NormLayerName, {info}, {info}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(EnsureL2NormalizationBackwardCompatibility) @@ -1623,14 +991,13 @@ BOOST_AUTO_TEST_CASE(EnsureL2NormalizationBackwardCompatibility) // Since this variable does not exist in the l2NormalizationModel dump, the default value will be loaded desc.m_Eps = 1e-12f; - L2NormalizationLayerVerifier verifier(layerName, {inputInfo}, {inputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {inputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeLogicalBinary) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(LogicalBinary) - const std::string layerName("logicalBinaryAnd"); const armnn::TensorShape shape{2, 1, 2, 2}; @@ -1657,14 +1024,13 @@ BOOST_AUTO_TEST_CASE(SerializeLogicalBinary) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - LogicalBinaryLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeLogicalUnary) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(ElementwiseUnary) - const std::string layerName("elementwiseUnaryLogicalNot"); const armnn::TensorShape shape{2, 1, 2, 2}; @@ -1690,15 +1056,14 @@ BOOST_AUTO_TEST_CASE(SerializeLogicalUnary) BOOST_CHECK(deserializedNetwork); - ElementwiseUnaryLayerVerifier verifier(layerName, { inputInfo }, { outputInfo }, descriptor); + LayerVerifierBaseWithDescriptor verifier( + layerName, { inputInfo }, { outputInfo }, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeLogSoftmax) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(LogSoftmax) - const std::string layerName("log_softmax"); const armnn::TensorInfo info({1, 10}, armnn::DataType::Float32); @@ -1720,14 +1085,12 @@ BOOST_AUTO_TEST_CASE(SerializeLogSoftmax) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - LogSoftmaxLayerVerifier verifier(layerName, {info}, {info}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {info}, {info}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeMaximum) { - DECLARE_LAYER_VERIFIER_CLASS(Maximum) - const std::string layerName("maximum"); const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32); @@ -1748,14 +1111,12 @@ BOOST_AUTO_TEST_CASE(SerializeMaximum) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - MaximumLayerVerifier verifier(layerName, {info, info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info, info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeMean) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Mean) - const std::string layerName("mean"); const armnn::TensorInfo inputInfo({1, 1, 3, 2}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({1, 1, 1, 2}, armnn::DataType::Float32); @@ -1778,14 +1139,12 @@ BOOST_AUTO_TEST_CASE(SerializeMean) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - MeanLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {inputInfo}, {outputInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeMerge) { - DECLARE_LAYER_VERIFIER_CLASS(Merge) - const std::string layerName("merge"); const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32); @@ -1806,8 +1165,8 @@ BOOST_AUTO_TEST_CASE(SerializeMerge) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - MergeLayerVerifier verifier(layerName, {info, info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info, info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } class MergerLayerVerifier : public LayerVerifierBaseWithDescriptor @@ -1819,19 +1178,35 @@ public: const armnn::OriginsDescriptor& descriptor) : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) {} - void VisitMergerLayer(const armnn::IConnectableLayer*, - const armnn::OriginsDescriptor&, - const char*) override - { - throw armnn::Exception("MergerLayer should have translated to ConcatLayer"); - } - - void VisitConcatLayer(const armnn::IConnectableLayer* layer, - const armnn::OriginsDescriptor& descriptor, - const char* name) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Merge: + { + throw armnn::Exception("MergerLayer should have translated to ConcatLayer"); + break; + } + case armnn::LayerType::Concat: + { + VerifyNameAndConnections(layer, name); + const armnn::MergerDescriptor& layerDescriptor = + static_cast(descriptor); + VerifyDescriptor(layerDescriptor); + break; + } + default: + { + throw armnn::Exception("Unexpected layer type in Merge test model"); + } + } } }; @@ -1870,7 +1245,7 @@ BOOST_AUTO_TEST_CASE(SerializeMerger) BOOST_CHECK(deserializedNetwork); MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(EnsureMergerLayerBackwardCompatibility) @@ -1939,7 +1314,7 @@ BOOST_AUTO_TEST_CASE(EnsureMergerLayerBackwardCompatibility) armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), 0); MergerLayerVerifier verifier("merger", { inputInfo, inputInfo }, { outputInfo }, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeConcat) @@ -1974,13 +1349,11 @@ BOOST_AUTO_TEST_CASE(SerializeConcat) // NOTE: using the MergerLayerVerifier to ensure that it is a concat layer and not a // merger layer that gets placed into the graph. MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeMinimum) { - DECLARE_LAYER_VERIFIER_CLASS(Minimum) - const std::string layerName("minimum"); const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32); @@ -2001,14 +1374,12 @@ BOOST_AUTO_TEST_CASE(SerializeMinimum) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - MinimumLayerVerifier verifier(layerName, {info, info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info, info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeMultiplication) { - DECLARE_LAYER_VERIFIER_CLASS(Multiplication) - const std::string layerName("multiplication"); const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32); @@ -2029,14 +1400,12 @@ BOOST_AUTO_TEST_CASE(SerializeMultiplication) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - MultiplicationLayerVerifier verifier(layerName, {info, info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info, info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializePrelu) { - DECLARE_LAYER_VERIFIER_CLASS(Prelu) - const std::string layerName("prelu"); armnn::TensorInfo inputTensorInfo ({ 4, 1, 2 }, armnn::DataType::Float32); @@ -2060,14 +1429,12 @@ BOOST_AUTO_TEST_CASE(SerializePrelu) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - PreluLayerVerifier verifier(layerName, {inputTensorInfo, alphaTensorInfo}, {outputTensorInfo}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {inputTensorInfo, alphaTensorInfo}, {outputTensorInfo}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeNormalization) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Normalization) - const std::string layerName("normalization"); const armnn::TensorInfo info({2, 1, 2, 2}, armnn::DataType::Float32); @@ -2092,12 +1459,10 @@ BOOST_AUTO_TEST_CASE(SerializeNormalization) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - NormalizationLayerVerifier verifier(layerName, {info}, {info}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {info}, {info}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } -DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Pad) - BOOST_AUTO_TEST_CASE(SerializePad) { const std::string layerName("pad"); @@ -2120,8 +1485,11 @@ BOOST_AUTO_TEST_CASE(SerializePad) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - PadLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, + {inputTensorInfo}, + {outputTensorInfo}, + desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(EnsurePadBackwardCompatibility) @@ -2174,14 +1542,12 @@ BOOST_AUTO_TEST_CASE(EnsurePadBackwardCompatibility) armnn::PadDescriptor descriptor({{ 0, 0 }, { 1, 0 }, { 1, 1 }, { 1, 2 }}); - PadLayerVerifier verifier("pad", { inputInfo }, { outputInfo }, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier("pad", { inputInfo }, { outputInfo }, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializePermute) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Permute) - const std::string layerName("permute"); const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32); const armnn::TensorInfo outputTensorInfo({1, 2, 3, 4}, armnn::DataType::Float32); @@ -2202,14 +1568,13 @@ BOOST_AUTO_TEST_CASE(SerializePermute) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - PermuteLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializePooling2d) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Pooling2d) - const std::string layerName("pooling2d"); const armnn::TensorInfo inputInfo({1, 2, 2, 1}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({1, 1, 1, 1}, armnn::DataType::Float32); @@ -2242,14 +1607,13 @@ BOOST_AUTO_TEST_CASE(SerializePooling2d) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - Pooling2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeQuantize) { - DECLARE_LAYER_VERIFIER_CLASS(Quantize) - const std::string layerName("quantize"); const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32); @@ -2267,14 +1631,12 @@ BOOST_AUTO_TEST_CASE(SerializeQuantize) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - QuantizeLayerVerifier verifier(layerName, {info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeRank) { - DECLARE_LAYER_VERIFIER_CLASS(Rank) - const std::string layerName("rank"); const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({1}, armnn::DataType::Signed32); @@ -2293,14 +1655,12 @@ BOOST_AUTO_TEST_CASE(SerializeRank) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - RankLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeReduceSum) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Reduce) - const std::string layerName("Reduce_Sum"); const armnn::TensorInfo inputInfo({1, 1, 3, 2}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({1, 1, 1, 2}, armnn::DataType::Float32); @@ -2323,14 +1683,12 @@ BOOST_AUTO_TEST_CASE(SerializeReduceSum) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - ReduceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {inputInfo}, {outputInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeReshape) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Reshape) - const std::string layerName("reshape"); const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({3, 3}, armnn::DataType::Float32); @@ -2351,14 +1709,13 @@ BOOST_AUTO_TEST_CASE(SerializeReshape) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - ReshapeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {outputInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeResize) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Resize) - const std::string layerName("resize"); const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32); @@ -2384,8 +1741,8 @@ BOOST_AUTO_TEST_CASE(SerializeResize) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - ResizeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {inputInfo}, {outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } class ResizeBilinearLayerVerifier : public LayerVerifierBaseWithDescriptor @@ -2398,25 +1755,36 @@ public: : LayerVerifierBaseWithDescriptor( layerName, inputInfos, outputInfos, descriptor) {} - void VisitResizeLayer(const armnn::IConnectableLayer* layer, - const armnn::ResizeDescriptor& descriptor, - const char* name) override - { - VerifyNameAndConnections(layer, name); - - BOOST_CHECK(descriptor.m_Method == armnn::ResizeMethod::Bilinear); - BOOST_CHECK(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth); - BOOST_CHECK(descriptor.m_TargetHeight == m_Descriptor.m_TargetHeight); - BOOST_CHECK(descriptor.m_DataLayout == m_Descriptor.m_DataLayout); - BOOST_CHECK(descriptor.m_AlignCorners == m_Descriptor.m_AlignCorners); - BOOST_CHECK(descriptor.m_HalfPixelCenters == m_Descriptor.m_HalfPixelCenters); - } - - void VisitResizeBilinearLayer(const armnn::IConnectableLayer*, - const armnn::ResizeBilinearDescriptor&, - const char*) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - throw armnn::Exception("ResizeBilinearLayer should have translated to ResizeLayer"); + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Resize: + { + VerifyNameAndConnections(layer, name); + const armnn::ResizeDescriptor& layerDescriptor = + static_cast(descriptor); + BOOST_CHECK(layerDescriptor.m_Method == armnn::ResizeMethod::Bilinear); + BOOST_CHECK(layerDescriptor.m_TargetWidth == m_Descriptor.m_TargetWidth); + BOOST_CHECK(layerDescriptor.m_TargetHeight == m_Descriptor.m_TargetHeight); + BOOST_CHECK(layerDescriptor.m_DataLayout == m_Descriptor.m_DataLayout); + BOOST_CHECK(layerDescriptor.m_AlignCorners == m_Descriptor.m_AlignCorners); + BOOST_CHECK(layerDescriptor.m_HalfPixelCenters == m_Descriptor.m_HalfPixelCenters); + break; + } + default: + { + throw armnn::Exception("Unexpected layer type in test model. ResizeBiliniar " + "should have translated to Resize"); + } + } } }; @@ -2452,7 +1820,7 @@ BOOST_AUTO_TEST_CASE(SerializeResizeBilinear) BOOST_CHECK(deserializedNetwork); ResizeBilinearLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(EnsureResizeBilinearBackwardCompatibility) @@ -2508,13 +1876,11 @@ BOOST_AUTO_TEST_CASE(EnsureResizeBilinearBackwardCompatibility) descriptor.m_TargetHeight = 2u; ResizeBilinearLayerVerifier verifier("resizeBilinear", { inputInfo }, { outputInfo }, descriptor); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSlice) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Slice) - const std::string layerName{"slice"}; const armnn::TensorInfo inputInfo = armnn::TensorInfo({3, 2, 3, 1}, armnn::DataType::Float32); @@ -2537,14 +1903,12 @@ BOOST_AUTO_TEST_CASE(SerializeSlice) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - SliceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {inputInfo}, {outputInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSoftmax) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Softmax) - const std::string layerName("softmax"); const armnn::TensorInfo info({1, 10}, armnn::DataType::Float32); @@ -2565,14 +1929,12 @@ BOOST_AUTO_TEST_CASE(SerializeSoftmax) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - SoftmaxLayerVerifier verifier(layerName, {info}, {info}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier(layerName, {info}, {info}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSpaceToBatchNd) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(SpaceToBatchNd) - const std::string layerName("spaceToBatchNd"); const armnn::TensorInfo inputInfo({2, 1, 2, 4}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({8, 1, 1, 3}, armnn::DataType::Float32); @@ -2596,14 +1958,13 @@ BOOST_AUTO_TEST_CASE(SerializeSpaceToBatchNd) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - SpaceToBatchNdLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSpaceToDepth) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(SpaceToDepth) - const std::string layerName("spaceToDepth"); const armnn::TensorInfo inputInfo ({ 1, 16, 8, 3 }, armnn::DataType::Float32); @@ -2627,14 +1988,13 @@ BOOST_AUTO_TEST_CASE(SerializeSpaceToDepth) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - SpaceToDepthLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSplitter) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Splitter) - const unsigned int numViews = 3; const unsigned int numDimensions = 4; const unsigned int inputShape[] = {1, 18, 4, 4}; @@ -2682,14 +2042,13 @@ BOOST_AUTO_TEST_CASE(SerializeSplitter) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - SplitterLayerVerifier verifier(layerName, {inputInfo}, {outputInfo, outputInfo, outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {outputInfo, outputInfo, outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeStack) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Stack) - const std::string layerName("stack"); armnn::TensorInfo inputTensorInfo ({4, 3, 5}, armnn::DataType::Float32); @@ -2714,14 +2073,13 @@ BOOST_AUTO_TEST_CASE(SerializeStack) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - StackLayerVerifier verifier(layerName, {inputTensorInfo, inputTensorInfo}, {outputTensorInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputTensorInfo, inputTensorInfo}, {outputTensorInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeStandIn) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(StandIn) - const std::string layerName("standIn"); armnn::TensorInfo tensorInfo({ 1u }, armnn::DataType::Float32); @@ -2749,14 +2107,13 @@ BOOST_AUTO_TEST_CASE(SerializeStandIn) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - StandInLayerVerifier verifier(layerName, { tensorInfo, tensorInfo }, { tensorInfo, tensorInfo }, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, { tensorInfo, tensorInfo }, { tensorInfo, tensorInfo }, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeStridedSlice) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(StridedSlice) - const std::string layerName("stridedSlice"); const armnn::TensorInfo inputInfo = armnn::TensorInfo({3, 2, 3, 1}, armnn::DataType::Float32); const armnn::TensorInfo outputInfo = armnn::TensorInfo({3, 1}, armnn::DataType::Float32); @@ -2780,14 +2137,13 @@ BOOST_AUTO_TEST_CASE(SerializeStridedSlice) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - StridedSliceLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputInfo}, {outputInfo}, desc); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSubtraction) { - DECLARE_LAYER_VERIFIER_CLASS(Subtraction) - const std::string layerName("subtraction"); const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32); @@ -2808,8 +2164,8 @@ BOOST_AUTO_TEST_CASE(SerializeSubtraction) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - SubtractionLayerVerifier verifier(layerName, {info, info}, {info}); - deserializedNetwork->Accept(verifier); + LayerVerifierBase verifier(layerName, {info, info}, {info}); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeSwitch) @@ -2820,16 +2176,31 @@ BOOST_AUTO_TEST_CASE(SerializeSwitch) SwitchLayerVerifier(const std::string& layerName, const std::vector& inputInfos, const std::vector& outputInfos) - : LayerVerifierBase(layerName, inputInfos, outputInfos) {} + : LayerVerifierBase(layerName, inputInfos, outputInfos) {} - void VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - VerifyNameAndConnections(layer, name); + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Constant: break; + case armnn::LayerType::Switch: + { + VerifyNameAndConnections(layer, name); + break; + } + default: + { + throw armnn::Exception("Unexpected layer type in Switch test model"); + } + } } - - void VisitConstantLayer(const armnn::IConnectableLayer*, - const armnn::ConstTensor&, - const char*) override {} }; const std::string layerName("switch"); @@ -2859,13 +2230,11 @@ BOOST_AUTO_TEST_CASE(SerializeSwitch) BOOST_CHECK(deserializedNetwork); SwitchLayerVerifier verifier(layerName, {info, info}, {info, info}); - deserializedNetwork->Accept(verifier); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeTranspose) { - DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Transpose) - const std::string layerName("transpose"); const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32); const armnn::TensorInfo outputTensorInfo({1, 2, 3, 4}, armnn::DataType::Float32); @@ -2886,54 +2255,13 @@ BOOST_AUTO_TEST_CASE(SerializeTranspose) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - TransposeLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor); - deserializedNetwork->Accept(verifier); + LayerVerifierBaseWithDescriptor verifier( + layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeTransposeConvolution2d) { - using Descriptor = armnn::TransposeConvolution2dDescriptor; - class TransposeConvolution2dLayerVerifier : public LayerVerifierBaseWithDescriptor - { - public: - TransposeConvolution2dLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_Weights(weights) - , m_Biases(biases) - {} - - void VisitTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer, - const Descriptor& descriptor, - const armnn::ConstTensor& weights, - const armnn::Optional& biases, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - - // check weights - CompareConstTensor(weights, m_Weights); - - // check biases - BOOST_CHECK(biases.has_value() == descriptor.m_BiasEnabled); - BOOST_CHECK(biases.has_value() == m_Biases.has_value()); - - if (biases.has_value() && m_Biases.has_value()) - { - CompareConstTensor(biases.value(), m_Biases.value()); - } - } - - private: - armnn::ConstTensor m_Weights; - armnn::Optional m_Biases; - }; - const std::string layerName("transposeConvolution2d"); const armnn::TensorInfo inputInfo ({ 1, 7, 7, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 9, 9, 1 }, armnn::DataType::Float32); @@ -2975,8 +2303,10 @@ BOOST_AUTO_TEST_CASE(SerializeTransposeConvolution2d) armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - TransposeConvolution2dLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, descriptor, weights, biases); - deserializedNetwork->Accept(verifier); + const std::vector constants {weights, biases}; + LayerVerifierBaseWithDescriptorAndConstants verifier( + layerName, {inputInfo}, {outputInfo}, descriptor, constants); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_CASE(SerializeDeserializeNonLinearNetwork) @@ -2991,16 +2321,31 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeNonLinearNetwork) : LayerVerifierBase(layerName, inputInfos, outputInfos) , m_LayerInput(layerInput) {} - void VisitConstantLayer(const armnn::IConnectableLayer* layer, - const armnn::ConstTensor& input, - const char* name) override + void ExecuteStrategy(const armnn::IConnectableLayer* layer, + const armnn::BaseDescriptor& descriptor, + const std::vector& constants, + const char* name, + const armnn::LayerBindingId id = 0) override { - VerifyNameAndConnections(layer, name); - CompareConstTensor(input, m_LayerInput); + armnn::IgnoreUnused(descriptor, constants, id); + switch (layer->GetType()) + { + case armnn::LayerType::Input: break; + case armnn::LayerType::Output: break; + case armnn::LayerType::Addition: break; + case armnn::LayerType::Constant: + { + VerifyNameAndConnections(layer, name); + CompareConstTensor(constants.at(0), m_LayerInput); + break; + } + default: + { + throw armnn::Exception("Unexpected layer type in test model"); + } + } } - void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {} - private: armnn::ConstTensor m_LayerInput; }; @@ -3029,2125 +2374,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeNonLinearNetwork) BOOST_CHECK(deserializedNetwork); ConstantLayerVerifier verifier(layerName, {}, {info}, constTensor); - deserializedNetwork->Accept(verifier); -} - -class VerifyLstmLayer : public LayerVerifierBaseWithDescriptor -{ -public: - VerifyLstmLayer(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const armnn::LstmDescriptor& descriptor, - const armnn::LstmInputParams& inputParams) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_InputParams(inputParams) {} - - void VisitLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::LstmDescriptor& descriptor, - const armnn::LstmInputParams& params, - const char* name) - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - VerifyInputParameters(params); - } - -protected: - void VerifyInputParameters(const armnn::LstmInputParams& params) - { - VerifyConstTensors( - "m_InputToInputWeights", m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights); - VerifyConstTensors( - "m_InputToForgetWeights", m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights); - VerifyConstTensors( - "m_InputToCellWeights", m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights); - VerifyConstTensors( - "m_InputToOutputWeights", m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights); - VerifyConstTensors( - "m_RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights); - VerifyConstTensors( - "m_RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights); - VerifyConstTensors( - "m_RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights); - VerifyConstTensors( - "m_RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights); - VerifyConstTensors( - "m_CellToInputWeights", m_InputParams.m_CellToInputWeights, params.m_CellToInputWeights); - VerifyConstTensors( - "m_CellToForgetWeights", m_InputParams.m_CellToForgetWeights, params.m_CellToForgetWeights); - VerifyConstTensors( - "m_CellToOutputWeights", m_InputParams.m_CellToOutputWeights, params.m_CellToOutputWeights); - VerifyConstTensors( - "m_InputGateBias", m_InputParams.m_InputGateBias, params.m_InputGateBias); - VerifyConstTensors( - "m_ForgetGateBias", m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias); - VerifyConstTensors( - "m_CellBias", m_InputParams.m_CellBias, params.m_CellBias); - VerifyConstTensors( - "m_OutputGateBias", m_InputParams.m_OutputGateBias, params.m_OutputGateBias); - VerifyConstTensors( - "m_ProjectionWeights", m_InputParams.m_ProjectionWeights, params.m_ProjectionWeights); - VerifyConstTensors( - "m_ProjectionBias", m_InputParams.m_ProjectionBias, params.m_ProjectionBias); - VerifyConstTensors( - "m_InputLayerNormWeights", m_InputParams.m_InputLayerNormWeights, params.m_InputLayerNormWeights); - VerifyConstTensors( - "m_ForgetLayerNormWeights", m_InputParams.m_ForgetLayerNormWeights, params.m_ForgetLayerNormWeights); - VerifyConstTensors( - "m_CellLayerNormWeights", m_InputParams.m_CellLayerNormWeights, params.m_CellLayerNormWeights); - VerifyConstTensors( - "m_OutputLayerNormWeights", m_InputParams.m_OutputLayerNormWeights, params.m_OutputLayerNormWeights); - } - -private: - armnn::LstmInputParams m_InputParams; -}; - -BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmCifgPeepholeNoProjection) -{ - armnn::LstmDescriptor descriptor; - descriptor.m_ActivationFunc = 4; - descriptor.m_ClippingThresProj = 0.0f; - descriptor.m_ClippingThresCell = 0.0f; - descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams - descriptor.m_ProjectionEnabled = false; - descriptor.m_PeepholeEnabled = true; - - const uint32_t batchSize = 1; - const uint32_t inputSize = 2; - const uint32_t numUnits = 4; - const uint32_t outputSize = numUnits; - - armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32); - std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); - armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData); - - std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); - armnn::ConstTensor inputToCellWeights(inputWeightsInfo1, inputToCellWeightsData); - - std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); - armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData); - - armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32); - std::vector recurrentToForgetWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); - armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData); - - std::vector recurrentToCellWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); - armnn::ConstTensor recurrentToCellWeights(inputWeightsInfo2, recurrentToCellWeightsData); - - std::vector recurrentToOutputWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); - armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData); - - armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32); - std::vector cellToForgetWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); - armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData); - - std::vector cellToOutputWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); - armnn::ConstTensor cellToOutputWeights(inputWeightsInfo3, cellToOutputWeightsData); - - std::vector forgetGateBiasData(numUnits, 1.0f); - armnn::ConstTensor forgetGateBias(inputWeightsInfo3, forgetGateBiasData); - - std::vector cellBiasData(numUnits, 0.0f); - armnn::ConstTensor cellBias(inputWeightsInfo3, cellBiasData); - - std::vector outputGateBiasData(numUnits, 0.0f); - armnn::ConstTensor outputGateBias(inputWeightsInfo3, outputGateBiasData); - - armnn::LstmInputParams params; - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - params.m_CellToForgetWeights = &cellToForgetWeights; - params.m_CellToOutputWeights = &cellToOutputWeights; - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); - const std::string layerName("lstm"); - armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str()); - armnn::IConnectableLayer* const scratchBuffer = network->AddOutputLayer(0); - armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(1); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(2); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(3); - - // connect up - armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); - armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); - armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); - armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 3 }, armnn::DataType::Float32); - - inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); - - outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); - - cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); - - lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0)); - lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff); - - lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0)); - lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); - - lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0)); - lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo); - - lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0)); - lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyLstmLayer checker( - layerName, - {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, - {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, - descriptor, - params); - deserializedNetwork->Accept(checker); -} - -BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeAndProjection) -{ - armnn::LstmDescriptor descriptor; - descriptor.m_ActivationFunc = 4; - descriptor.m_ClippingThresProj = 0.0f; - descriptor.m_ClippingThresCell = 0.0f; - descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams - descriptor.m_ProjectionEnabled = true; - descriptor.m_PeepholeEnabled = true; - - const uint32_t batchSize = 2; - const uint32_t inputSize = 5; - const uint32_t numUnits = 20; - const uint32_t outputSize = 16; - - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); - std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); - - std::vector inputToForgetWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData); - - std::vector inputToCellWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData); - - std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); - std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); - - std::vector forgetGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData); - - std::vector cellBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellBias(tensorInfo20, cellBiasData); - - std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); - std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); - - std::vector recurrentToForgetWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData); - - std::vector recurrentToCellWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData); - - std::vector recurrentToOutputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData); - - std::vector cellToInputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData); - - std::vector cellToForgetWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData); - - std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); - std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); - armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); - std::vector projectionBiasData(outputSize, 0.f); - armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); - - armnn::LstmInputParams params; - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - // additional params because: descriptor.m_CifgEnabled = false - params.m_InputToInputWeights = &inputToInputWeights; - params.m_RecurrentToInputWeights = &recurrentToInputWeights; - params.m_CellToInputWeights = &cellToInputWeights; - params.m_InputGateBias = &inputGateBias; - - // additional params because: descriptor.m_ProjectionEnabled = true - params.m_ProjectionWeights = &projectionWeights; - params.m_ProjectionBias = &projectionBias; - - // additional params because: descriptor.m_PeepholeEnabled = true - params.m_CellToForgetWeights = &cellToForgetWeights; - params.m_CellToOutputWeights = &cellToOutputWeights; - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); - const std::string layerName("lstm"); - armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str()); - armnn::IConnectableLayer* const scratchBuffer = network->AddOutputLayer(0); - armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(1); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(2); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(3); - - // connect up - armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); - armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); - armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); - armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32); - - inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); - - outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); - - cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); - - lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0)); - lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff); - - lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0)); - lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); - - lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0)); - lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo); - - lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0)); - lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyLstmLayer checker( - layerName, - {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, - {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, - descriptor, - params); - deserializedNetwork->Accept(checker); -} - -BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm) -{ - armnn::LstmDescriptor descriptor; - descriptor.m_ActivationFunc = 4; - descriptor.m_ClippingThresProj = 0.0f; - descriptor.m_ClippingThresCell = 0.0f; - descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams - descriptor.m_ProjectionEnabled = true; - descriptor.m_PeepholeEnabled = true; - descriptor.m_LayerNormEnabled = true; - - const uint32_t batchSize = 2; - const uint32_t inputSize = 5; - const uint32_t numUnits = 20; - const uint32_t outputSize = 16; - - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); - std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); - - std::vector inputToForgetWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData); - - std::vector inputToCellWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData); - - std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); - armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); - std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); - - std::vector forgetGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData); - - std::vector cellBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellBias(tensorInfo20, cellBiasData); - - std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); - std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); - - std::vector recurrentToForgetWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData); - - std::vector recurrentToCellWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData); - - std::vector recurrentToOutputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); - armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData); - - std::vector cellToInputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData); - - std::vector cellToForgetWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData); - - std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); - std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); - armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); - std::vector projectionBiasData(outputSize, 0.f); - armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); - - std::vector inputLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor inputLayerNormWeights(tensorInfo20, forgetGateBiasData); - - std::vector forgetLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor forgetLayerNormWeights(tensorInfo20, forgetGateBiasData); - - std::vector cellLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor cellLayerNormWeights(tensorInfo20, forgetGateBiasData); - - std::vector outLayerNormWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); - armnn::ConstTensor outLayerNormWeights(tensorInfo20, forgetGateBiasData); - - armnn::LstmInputParams params; - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - // additional params because: descriptor.m_CifgEnabled = false - params.m_InputToInputWeights = &inputToInputWeights; - params.m_RecurrentToInputWeights = &recurrentToInputWeights; - params.m_CellToInputWeights = &cellToInputWeights; - params.m_InputGateBias = &inputGateBias; - - // additional params because: descriptor.m_ProjectionEnabled = true - params.m_ProjectionWeights = &projectionWeights; - params.m_ProjectionBias = &projectionBias; - - // additional params because: descriptor.m_PeepholeEnabled = true - params.m_CellToForgetWeights = &cellToForgetWeights; - params.m_CellToOutputWeights = &cellToOutputWeights; - - // additional params because: despriptor.m_LayerNormEnabled = true - params.m_InputLayerNormWeights = &inputLayerNormWeights; - params.m_ForgetLayerNormWeights = &forgetLayerNormWeights; - params.m_CellLayerNormWeights = &cellLayerNormWeights; - params.m_OutputLayerNormWeights = &outLayerNormWeights; - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); - const std::string layerName("lstm"); - armnn::IConnectableLayer* const lstmLayer = network->AddLstmLayer(descriptor, params, layerName.c_str()); - armnn::IConnectableLayer* const scratchBuffer = network->AddOutputLayer(0); - armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(1); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(2); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(3); - - // connect up - armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); - armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); - armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); - armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32); - - inputLayer->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); - - outputStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(1)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); - - cellStateIn->GetOutputSlot(0).Connect(lstmLayer->GetInputSlot(2)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); - - lstmLayer->GetOutputSlot(0).Connect(scratchBuffer->GetInputSlot(0)); - lstmLayer->GetOutputSlot(0).SetTensorInfo(lstmTensorInfoScratchBuff); - - lstmLayer->GetOutputSlot(1).Connect(outputStateOut->GetInputSlot(0)); - lstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); - - lstmLayer->GetOutputSlot(2).Connect(cellStateOut->GetInputSlot(0)); - lstmLayer->GetOutputSlot(2).SetTensorInfo(cellStateTensorInfo); - - lstmLayer->GetOutputSlot(3).Connect(outputLayer->GetInputSlot(0)); - lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyLstmLayer checker( - layerName, - {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, - {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, - descriptor, - params); - deserializedNetwork->Accept(checker); -} - -BOOST_AUTO_TEST_CASE(EnsureLstmLayersBackwardCompatibility) -{ - // The hex data below is a flat buffer containing a lstm layer with no Cifg, with peephole and projection - // enabled. That data was obtained before additional layer normalization parameters where added to the - // lstm serializer. That way it can be tested if a lstm model with the old parameter configuration can - // still be loaded - const std::vector lstmNoCifgWithPeepholeAndProjectionModel = - { - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, - 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0xDC, 0x29, 0x00, 0x00, 0x38, 0x29, 0x00, 0x00, 0xB4, 0x28, 0x00, 0x00, 0x94, 0x01, 0x00, 0x00, 0x3C, 0x01, - 0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x70, 0xD6, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x06, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x88, 0xD7, - 0xFF, 0xFF, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF6, 0xD6, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xE8, 0xD7, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xC8, 0xD6, 0xFF, 0xFF, 0x00, 0x00, - 0x00, 0x0B, 0x04, 0x00, 0x00, 0x00, 0x5E, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xE0, 0xD7, 0xFF, 0xFF, - 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x4E, 0xD7, 0xFF, 0xFF, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, - 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xD8, - 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, - 0x04, 0x00, 0x00, 0x00, 0xB6, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x38, 0xD8, 0xFF, 0xFF, 0x08, 0x00, - 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xA6, 0xD7, 0xFF, 0xFF, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xD8, 0xFF, 0xFF, - 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x78, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0B, 0x04, 0x00, - 0x00, 0x00, 0x0E, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x16, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, - 0xFA, 0xD7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, - 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xD8, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x6C, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x23, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, - 0x12, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x0A, 0x00, 0x00, 0x00, 0xE0, 0x25, 0x00, 0x00, 0xD0, 0x25, - 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x00, 0x48, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x18, 0x00, 0x1C, 0x00, 0x20, 0x00, 0x24, 0x00, 0x28, 0x00, 0x2C, 0x00, 0x30, 0x00, - 0x34, 0x00, 0x38, 0x00, 0x3C, 0x00, 0x40, 0x00, 0x44, 0x00, 0x26, 0x00, 0x00, 0x00, 0xC4, 0x23, 0x00, 0x00, - 0xF8, 0x21, 0x00, 0x00, 0x2C, 0x20, 0x00, 0x00, 0xF0, 0x1A, 0x00, 0x00, 0xB4, 0x15, 0x00, 0x00, 0x78, 0x10, - 0x00, 0x00, 0xF0, 0x0F, 0x00, 0x00, 0x68, 0x0F, 0x00, 0x00, 0xE0, 0x0E, 0x00, 0x00, 0x14, 0x0D, 0x00, 0x00, - 0xD8, 0x07, 0x00, 0x00, 0x50, 0x07, 0x00, 0x00, 0xC8, 0x06, 0x00, 0x00, 0x8C, 0x01, 0x00, 0x00, 0x14, 0x01, - 0x00, 0x00, 0x8C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xD7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, - 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFE, 0xD8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x72, 0xD8, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x82, 0xD9, 0xFF, 0xFF, - 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xD8, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0xF6, 0xD8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x54, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x06, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xD9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6A, 0xD9, 0xFF, 0xFF, 0x00, 0x00, - 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7A, 0xDA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xA2, 0xDE, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB2, 0xDF, 0xFF, 0xFF, - 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xDF, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x26, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x36, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x92, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xAA, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, - 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xBA, 0xE0, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xC6, 0xE4, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xE2, 0xE4, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF2, 0xE5, 0xFF, 0xFF, 0x04, 0x00, - 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, - 0x00, 0x00, 0xAA, 0xE6, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xBA, 0xE7, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x16, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x2E, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3E, 0xE8, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xB2, 0xE7, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x03, 0x64, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xC2, 0xE8, 0xFF, 0xFF, 0x04, 0x00, - 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0xE8, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, - 0x00, 0x00, 0x36, 0xE8, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x46, 0xE9, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xED, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, - 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6E, 0xED, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x14, 0x05, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x7E, 0xEE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x8A, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xA6, 0xF2, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, - 0x14, 0x05, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xB6, 0xF3, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x40, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xC2, 0xF7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xDE, 0xF7, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xEE, 0xF8, 0xFF, 0xFF, 0x04, 0x00, - 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, - 0x00, 0x00, 0xA6, 0xF9, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xB6, 0xFA, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0xFB, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x6E, 0xFB, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xA4, 0x01, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7E, 0xFC, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x1A, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x0C, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x01, 0x04, 0x00, 0x00, 0x00, 0x2E, 0xFE, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x22, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6C, 0x73, - 0x74, 0x6D, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xEC, 0x00, 0x00, 0x00, 0xD0, 0x00, 0x00, 0x00, - 0xB4, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x30, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xA6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x3C, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xCE, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0xFF, 0xFF, 0xFF, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xF6, 0xFD, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xB4, 0xFE, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0x1A, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, - 0xF0, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, - 0x10, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00, - 0x7E, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x76, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x68, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xCE, 0xFE, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0E, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0E, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, - 0x0E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, - 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6E, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, - 0x0C, 0x00, 0x07, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00, - 0xF6, 0xFF, 0xFF, 0xFF, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, 0x04, 0x00, 0x06, 0x00, - 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x0C, 0x00, 0x10, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, - 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x10, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0C, 0x00, - 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00 - }; - - armnn::INetworkPtr deserializedNetwork = - DeserializeNetwork(std::string(lstmNoCifgWithPeepholeAndProjectionModel.begin(), - lstmNoCifgWithPeepholeAndProjectionModel.end())); - - BOOST_CHECK(deserializedNetwork); - - // generating the same model parameters which where used to serialize the model (Layer norm is not specified) - armnn::LstmDescriptor descriptor; - descriptor.m_ActivationFunc = 4; - descriptor.m_ClippingThresProj = 0.0f; - descriptor.m_ClippingThresCell = 0.0f; - descriptor.m_CifgEnabled = false; - descriptor.m_ProjectionEnabled = true; - descriptor.m_PeepholeEnabled = true; - - const uint32_t batchSize = 2u; - const uint32_t inputSize = 5u; - const uint32_t numUnits = 20u; - const uint32_t outputSize = 16u; - - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); - std::vector inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); - armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); - - std::vector inputToForgetWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); - armnn::ConstTensor inputToForgetWeights(tensorInfo20x5, inputToForgetWeightsData); - - std::vector inputToCellWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); - armnn::ConstTensor inputToCellWeights(tensorInfo20x5, inputToCellWeightsData); - - std::vector inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); - armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); - std::vector inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); - - std::vector forgetGateBiasData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor forgetGateBias(tensorInfo20, forgetGateBiasData); - - std::vector cellBiasData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor cellBias(tensorInfo20, cellBiasData); - - std::vector outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); - std::vector recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); - armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); - - std::vector recurrentToForgetWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); - armnn::ConstTensor recurrentToForgetWeights(tensorInfo20x16, recurrentToForgetWeightsData); - - std::vector recurrentToCellWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); - armnn::ConstTensor recurrentToCellWeights(tensorInfo20x16, recurrentToCellWeightsData); - - std::vector recurrentToOutputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); - armnn::ConstTensor recurrentToOutputWeights(tensorInfo20x16, recurrentToOutputWeightsData); - - std::vector cellToInputWeightsData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor cellToInputWeights(tensorInfo20, cellToInputWeightsData); - - std::vector cellToForgetWeightsData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor cellToForgetWeights(tensorInfo20, cellToForgetWeightsData); - - std::vector cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f); - armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); - std::vector projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f); - armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); - std::vector projectionBiasData(outputSize, 0.0f); - armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); - - armnn::LstmInputParams params; - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - // additional params because: descriptor.m_CifgEnabled = false - params.m_InputToInputWeights = &inputToInputWeights; - params.m_RecurrentToInputWeights = &recurrentToInputWeights; - params.m_CellToInputWeights = &cellToInputWeights; - params.m_InputGateBias = &inputGateBias; - - // additional params because: descriptor.m_ProjectionEnabled = true - params.m_ProjectionWeights = &projectionWeights; - params.m_ProjectionBias = &projectionBias; - - // additional params because: descriptor.m_PeepholeEnabled = true - params.m_CellToForgetWeights = &cellToForgetWeights; - params.m_CellToOutputWeights = &cellToOutputWeights; - - const std::string layerName("lstm"); - armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32); - armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32); - armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32); - armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32); - - VerifyLstmLayer checker( - layerName, - {inputTensorInfo, outputStateTensorInfo, cellStateTensorInfo}, - {lstmTensorInfoScratchBuff, outputStateTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, - descriptor, - params); - deserializedNetwork->Accept(checker); -} -class VerifyQuantizedLstmLayer : public LayerVerifierBase -{ - -public: - VerifyQuantizedLstmLayer(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const armnn::QuantizedLstmInputParams& inputParams) - : LayerVerifierBase(layerName, inputInfos, outputInfos), m_InputParams(inputParams) {} - - void VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::QuantizedLstmInputParams& params, - const char* name) - { - VerifyNameAndConnections(layer, name); - VerifyInputParameters(params); - } - -protected: - void VerifyInputParameters(const armnn::QuantizedLstmInputParams& params) - { - VerifyConstTensors("m_InputToInputWeights", - m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights); - VerifyConstTensors("m_InputToForgetWeights", - m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights); - VerifyConstTensors("m_InputToCellWeights", - m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights); - VerifyConstTensors("m_InputToOutputWeights", - m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights); - VerifyConstTensors("m_RecurrentToInputWeights", - m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights); - VerifyConstTensors("m_RecurrentToForgetWeights", - m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights); - VerifyConstTensors("m_RecurrentToCellWeights", - m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights); - VerifyConstTensors("m_RecurrentToOutputWeights", - m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights); - VerifyConstTensors("m_InputGateBias", - m_InputParams.m_InputGateBias, params.m_InputGateBias); - VerifyConstTensors("m_ForgetGateBias", - m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias); - VerifyConstTensors("m_CellBias", - m_InputParams.m_CellBias, params.m_CellBias); - VerifyConstTensors("m_OutputGateBias", - m_InputParams.m_OutputGateBias, params.m_OutputGateBias); - } - -private: - armnn::QuantizedLstmInputParams m_InputParams; -}; - -BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm) -{ - const uint32_t batchSize = 1; - const uint32_t inputSize = 2; - const uint32_t numUnits = 4; - const uint32_t outputSize = numUnits; - - // Scale/Offset for input/output, cellState In/Out, weights, bias - float inputOutputScale = 0.0078125f; - int32_t inputOutputOffset = 128; - - float cellStateScale = 0.00048828125f; - int32_t cellStateOffset = 0; - - float weightsScale = 0.00408021f; - int32_t weightsOffset = 100; - - float biasScale = 3.1876640625e-05f; - int32_t biasOffset = 0; - - // The shape of weight data is {outputSize, inputSize} = {4, 2} - armnn::TensorShape inputToInputWeightsShape = {4, 2}; - std::vector inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; - armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData); - - armnn::TensorShape inputToForgetWeightsShape = {4, 2}; - std::vector inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; - armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData); - - armnn::TensorShape inputToCellWeightsShape = {4, 2}; - std::vector inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; - armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData); - - armnn::TensorShape inputToOutputWeightsShape = {4, 2}; - std::vector inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8}; - armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData); - - // The shape of recurrent weight data is {outputSize, outputSize} = {4, 4} - armnn::TensorShape recurrentToInputWeightsShape = {4, 4}; - std::vector recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData); - - armnn::TensorShape recurrentToForgetWeightsShape = {4, 4}; - std::vector recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData); - - armnn::TensorShape recurrentToCellWeightsShape = {4, 4}; - std::vector recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData); - - armnn::TensorShape recurrentToOutputWeightsShape = {4, 4}; - std::vector recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData); - - // The shape of bias data is {outputSize} = {4} - armnn::TensorShape inputGateBiasShape = {4}; - std::vector inputGateBiasData = {1, 2, 3, 4}; - armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape, - armnn::DataType::Signed32, - biasScale, - biasOffset); - armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData); - - armnn::TensorShape forgetGateBiasShape = {4}; - std::vector forgetGateBiasData = {1, 2, 3, 4}; - armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape, - armnn::DataType::Signed32, - biasScale, - biasOffset); - armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData); - - armnn::TensorShape cellBiasShape = {4}; - std::vector cellBiasData = {1, 2, 3, 4}; - armnn::TensorInfo cellBiasInfo(cellBiasShape, - armnn::DataType::Signed32, - biasScale, - biasOffset); - armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData); - - armnn::TensorShape outputGateBiasShape = {4}; - std::vector outputGateBiasData = {1, 2, 3, 4}; - armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape, - armnn::DataType::Signed32, - biasScale, - biasOffset); - armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData); - - armnn::QuantizedLstmInputParams params; - params.m_InputToInputWeights = &inputToInputWeights; - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - params.m_RecurrentToInputWeights = &recurrentToInputWeights; - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - params.m_InputGateBias = &inputGateBias; - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(2); - const std::string layerName("QuantizedLstm"); - armnn::IConnectableLayer* const quantizedLstmLayer = network->AddQuantizedLstmLayer(params, layerName.c_str()); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(0); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(1); - - // Connect up - armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, - armnn::DataType::QAsymmU8, - inputOutputScale, - inputOutputOffset); - armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits }, - armnn::DataType::QSymmS16, - cellStateScale, - cellStateOffset); - armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, - armnn::DataType::QAsymmU8, - inputOutputScale, - inputOutputOffset); - - inputLayer->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); - - cellStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(1)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); - - outputStateIn->GetOutputSlot(0).Connect(quantizedLstmLayer->GetInputSlot(2)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(outputStateTensorInfo); - - quantizedLstmLayer->GetOutputSlot(0).Connect(cellStateOut->GetInputSlot(0)); - quantizedLstmLayer->GetOutputSlot(0).SetTensorInfo(cellStateTensorInfo); - - quantizedLstmLayer->GetOutputSlot(1).Connect(outputLayer->GetInputSlot(0)); - quantizedLstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyQuantizedLstmLayer checker(layerName, - {inputTensorInfo, cellStateTensorInfo, outputStateTensorInfo}, - {cellStateTensorInfo, outputStateTensorInfo}, - params); - - deserializedNetwork->Accept(checker); -} - -class VerifyQLstmLayer : public LayerVerifierBaseWithDescriptor -{ -public: - VerifyQLstmLayer(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const armnn::QLstmDescriptor& descriptor, - const armnn::LstmInputParams& inputParams) - : LayerVerifierBaseWithDescriptor(layerName, inputInfos, outputInfos, descriptor) - , m_InputParams(inputParams) {} - - void VisitQLstmLayer(const armnn::IConnectableLayer* layer, - const armnn::QLstmDescriptor& descriptor, - const armnn::LstmInputParams& params, - const char* name) - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - VerifyInputParameters(params); - } - -protected: - void VerifyInputParameters(const armnn::LstmInputParams& params) - { - VerifyConstTensors( - "m_InputToInputWeights", m_InputParams.m_InputToInputWeights, params.m_InputToInputWeights); - VerifyConstTensors( - "m_InputToForgetWeights", m_InputParams.m_InputToForgetWeights, params.m_InputToForgetWeights); - VerifyConstTensors( - "m_InputToCellWeights", m_InputParams.m_InputToCellWeights, params.m_InputToCellWeights); - VerifyConstTensors( - "m_InputToOutputWeights", m_InputParams.m_InputToOutputWeights, params.m_InputToOutputWeights); - VerifyConstTensors( - "m_RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, params.m_RecurrentToInputWeights); - VerifyConstTensors( - "m_RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, params.m_RecurrentToForgetWeights); - VerifyConstTensors( - "m_RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, params.m_RecurrentToCellWeights); - VerifyConstTensors( - "m_RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, params.m_RecurrentToOutputWeights); - VerifyConstTensors( - "m_CellToInputWeights", m_InputParams.m_CellToInputWeights, params.m_CellToInputWeights); - VerifyConstTensors( - "m_CellToForgetWeights", m_InputParams.m_CellToForgetWeights, params.m_CellToForgetWeights); - VerifyConstTensors( - "m_CellToOutputWeights", m_InputParams.m_CellToOutputWeights, params.m_CellToOutputWeights); - VerifyConstTensors( - "m_InputGateBias", m_InputParams.m_InputGateBias, params.m_InputGateBias); - VerifyConstTensors( - "m_ForgetGateBias", m_InputParams.m_ForgetGateBias, params.m_ForgetGateBias); - VerifyConstTensors( - "m_CellBias", m_InputParams.m_CellBias, params.m_CellBias); - VerifyConstTensors( - "m_OutputGateBias", m_InputParams.m_OutputGateBias, params.m_OutputGateBias); - VerifyConstTensors( - "m_ProjectionWeights", m_InputParams.m_ProjectionWeights, params.m_ProjectionWeights); - VerifyConstTensors( - "m_ProjectionBias", m_InputParams.m_ProjectionBias, params.m_ProjectionBias); - VerifyConstTensors( - "m_InputLayerNormWeights", m_InputParams.m_InputLayerNormWeights, params.m_InputLayerNormWeights); - VerifyConstTensors( - "m_ForgetLayerNormWeights", m_InputParams.m_ForgetLayerNormWeights, params.m_ForgetLayerNormWeights); - VerifyConstTensors( - "m_CellLayerNormWeights", m_InputParams.m_CellLayerNormWeights, params.m_CellLayerNormWeights); - VerifyConstTensors( - "m_OutputLayerNormWeights", m_InputParams.m_OutputLayerNormWeights, params.m_OutputLayerNormWeights); - } - -private: - armnn::LstmInputParams m_InputParams; -}; - -BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmBasic) -{ - armnn::QLstmDescriptor descriptor; - - descriptor.m_CifgEnabled = true; - descriptor.m_ProjectionEnabled = false; - descriptor.m_PeepholeEnabled = false; - descriptor.m_LayerNormEnabled = false; - - descriptor.m_CellClip = 0.0f; - descriptor.m_ProjectionClip = 0.0f; - - descriptor.m_InputIntermediateScale = 0.00001f; - descriptor.m_ForgetIntermediateScale = 0.00001f; - descriptor.m_CellIntermediateScale = 0.00001f; - descriptor.m_OutputIntermediateScale = 0.00001f; - - descriptor.m_HiddenStateScale = 0.07f; - descriptor.m_HiddenStateZeroPoint = 0; - - const unsigned int numBatches = 2; - const unsigned int inputSize = 5; - const unsigned int outputSize = 4; - const unsigned int numUnits = 4; - - // Scale/Offset quantization info - float inputScale = 0.0078f; - int32_t inputOffset = 0; - - float outputScale = 0.0078f; - int32_t outputOffset = 0; - - float cellStateScale = 3.5002e-05f; - int32_t cellStateOffset = 0; - - float weightsScale = 0.007f; - int32_t weightsOffset = 0; - - float biasScale = 3.5002e-05f / 1024; - int32_t biasOffset = 0; - - // Weights and bias tensor and quantization info - armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset); - - std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - - armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData); - armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData); - armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData); - - std::vector recurrentToForgetWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector recurrentToCellWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector recurrentToOutputWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - - armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData); - armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData); - armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData); - - std::vector forgetGateBiasData(numUnits, 1); - std::vector cellBiasData(numUnits, 0); - std::vector outputGateBiasData(numUnits, 0); - - armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData); - armnn::ConstTensor cellBias(biasInfo, cellBiasData); - armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData); - - // Set up params - armnn::LstmInputParams params; - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - // Create network - armnn::INetworkPtr network = armnn::INetwork::Create(); - const std::string layerName("qLstm"); - - armnn::IConnectableLayer* const input = network->AddInputLayer(0); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(2); - - armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str()); - - armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(1); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(2); - - // Input/Output tensor info - armnn::TensorInfo inputInfo({numBatches , inputSize}, - armnn::DataType::QAsymmS8, - inputScale, - inputOffset); - - armnn::TensorInfo cellStateInfo({numBatches , numUnits}, - armnn::DataType::QSymmS16, - cellStateScale, - cellStateOffset); - - armnn::TensorInfo outputStateInfo({numBatches , outputSize}, - armnn::DataType::QAsymmS8, - outputScale, - outputOffset); - - // Connect input/output slots - input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(inputInfo); - - outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo); - - cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo); - - qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo); - - qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo); - - qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyQLstmLayer checker(layerName, - {inputInfo, cellStateInfo, outputStateInfo}, - {outputStateInfo, cellStateInfo, outputStateInfo}, - descriptor, - params); - - deserializedNetwork->Accept(checker); -} - -BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmCifgLayerNorm) -{ - armnn::QLstmDescriptor descriptor; - - // CIFG params are used when CIFG is disabled - descriptor.m_CifgEnabled = true; - descriptor.m_ProjectionEnabled = false; - descriptor.m_PeepholeEnabled = false; - descriptor.m_LayerNormEnabled = true; - - descriptor.m_CellClip = 0.0f; - descriptor.m_ProjectionClip = 0.0f; - - descriptor.m_InputIntermediateScale = 0.00001f; - descriptor.m_ForgetIntermediateScale = 0.00001f; - descriptor.m_CellIntermediateScale = 0.00001f; - descriptor.m_OutputIntermediateScale = 0.00001f; - - descriptor.m_HiddenStateScale = 0.07f; - descriptor.m_HiddenStateZeroPoint = 0; - - const unsigned int numBatches = 2; - const unsigned int inputSize = 5; - const unsigned int outputSize = 4; - const unsigned int numUnits = 4; - - // Scale/Offset quantization info - float inputScale = 0.0078f; - int32_t inputOffset = 0; - - float outputScale = 0.0078f; - int32_t outputOffset = 0; - - float cellStateScale = 3.5002e-05f; - int32_t cellStateOffset = 0; - - float weightsScale = 0.007f; - int32_t weightsOffset = 0; - - float layerNormScale = 3.5002e-05f; - int32_t layerNormOffset = 0; - - float biasScale = layerNormScale / 1024; - int32_t biasOffset = 0; - - // Weights and bias tensor and quantization info - armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo biasInfo({numUnits}, - armnn::DataType::Signed32, - biasScale, - biasOffset); - - armnn::TensorInfo layerNormWeightsInfo({numUnits}, - armnn::DataType::QSymmS16, - layerNormScale, - layerNormOffset); - - // Mandatory params - std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - - armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData); - armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData); - armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData); - - std::vector recurrentToForgetWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector recurrentToCellWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector recurrentToOutputWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - - armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData); - armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData); - armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData); - - std::vector forgetGateBiasData(numUnits, 1); - std::vector cellBiasData(numUnits, 0); - std::vector outputGateBiasData(numUnits, 0); - - armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData); - armnn::ConstTensor cellBias(biasInfo, cellBiasData); - armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData); - - // Layer Norm - std::vector forgetLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - std::vector cellLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - std::vector outputLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - - armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData); - armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData); - armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData); - - // Set up params - armnn::LstmInputParams params; - - // Mandatory params - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - // Layer Norm - params.m_ForgetLayerNormWeights = &forgetLayerNormWeights; - params.m_CellLayerNormWeights = &cellLayerNormWeights; - params.m_OutputLayerNormWeights = &outputLayerNormWeights; - - // Create network - armnn::INetworkPtr network = armnn::INetwork::Create(); - const std::string layerName("qLstm"); - - armnn::IConnectableLayer* const input = network->AddInputLayer(0); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(2); - - armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str()); - - armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(1); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(2); - - // Input/Output tensor info - armnn::TensorInfo inputInfo({numBatches , inputSize}, - armnn::DataType::QAsymmS8, - inputScale, - inputOffset); - - armnn::TensorInfo cellStateInfo({numBatches , numUnits}, - armnn::DataType::QSymmS16, - cellStateScale, - cellStateOffset); - - armnn::TensorInfo outputStateInfo({numBatches , outputSize}, - armnn::DataType::QAsymmS8, - outputScale, - outputOffset); - - // Connect input/output slots - input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(inputInfo); - - outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo); - - cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo); - - qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo); - - qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo); - - qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyQLstmLayer checker(layerName, - {inputInfo, cellStateInfo, outputStateInfo}, - {outputStateInfo, cellStateInfo, outputStateInfo}, - descriptor, - params); - - deserializedNetwork->Accept(checker); -} - -BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmAdvanced) -{ - armnn::QLstmDescriptor descriptor; - - descriptor.m_CifgEnabled = false; - descriptor.m_ProjectionEnabled = true; - descriptor.m_PeepholeEnabled = true; - descriptor.m_LayerNormEnabled = true; - - descriptor.m_CellClip = 0.1f; - descriptor.m_ProjectionClip = 0.1f; - - descriptor.m_InputIntermediateScale = 0.00001f; - descriptor.m_ForgetIntermediateScale = 0.00001f; - descriptor.m_CellIntermediateScale = 0.00001f; - descriptor.m_OutputIntermediateScale = 0.00001f; - - descriptor.m_HiddenStateScale = 0.07f; - descriptor.m_HiddenStateZeroPoint = 0; - - const unsigned int numBatches = 2; - const unsigned int inputSize = 5; - const unsigned int outputSize = 4; - const unsigned int numUnits = 4; - - // Scale/Offset quantization info - float inputScale = 0.0078f; - int32_t inputOffset = 0; - - float outputScale = 0.0078f; - int32_t outputOffset = 0; - - float cellStateScale = 3.5002e-05f; - int32_t cellStateOffset = 0; - - float weightsScale = 0.007f; - int32_t weightsOffset = 0; - - float layerNormScale = 3.5002e-05f; - int32_t layerNormOffset = 0; - - float biasScale = layerNormScale / 1024; - int32_t biasOffset = 0; - - // Weights and bias tensor and quantization info - armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo biasInfo({numUnits}, - armnn::DataType::Signed32, - biasScale, - biasOffset); - - armnn::TensorInfo peepholeWeightsInfo({numUnits}, - armnn::DataType::QSymmS16, - weightsScale, - weightsOffset); - - armnn::TensorInfo layerNormWeightsInfo({numUnits}, - armnn::DataType::QSymmS16, - layerNormScale, - layerNormOffset); - - armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - // Mandatory params - std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - - armnn::ConstTensor inputToForgetWeights(inputWeightsInfo, inputToForgetWeightsData); - armnn::ConstTensor inputToCellWeights(inputWeightsInfo, inputToCellWeightsData); - armnn::ConstTensor inputToOutputWeights(inputWeightsInfo, inputToOutputWeightsData); - - std::vector recurrentToForgetWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector recurrentToCellWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector recurrentToOutputWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - - armnn::ConstTensor recurrentToForgetWeights(recurrentWeightsInfo, recurrentToForgetWeightsData); - armnn::ConstTensor recurrentToCellWeights(recurrentWeightsInfo, recurrentToCellWeightsData); - armnn::ConstTensor recurrentToOutputWeights(recurrentWeightsInfo, recurrentToOutputWeightsData); - - std::vector forgetGateBiasData(numUnits, 1); - std::vector cellBiasData(numUnits, 0); - std::vector outputGateBiasData(numUnits, 0); - - armnn::ConstTensor forgetGateBias(biasInfo, forgetGateBiasData); - armnn::ConstTensor cellBias(biasInfo, cellBiasData); - armnn::ConstTensor outputGateBias(biasInfo, outputGateBiasData); - - // CIFG - std::vector inputToInputWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); - std::vector recurrentToInputWeightsData = - GenerateRandomData(recurrentWeightsInfo.GetNumElements()); - std::vector inputGateBiasData(numUnits, 1); - - armnn::ConstTensor inputToInputWeights(inputWeightsInfo, inputToInputWeightsData); - armnn::ConstTensor recurrentToInputWeights(recurrentWeightsInfo, recurrentToInputWeightsData); - armnn::ConstTensor inputGateBias(biasInfo, inputGateBiasData); - - // Peephole - std::vector cellToInputWeightsData = GenerateRandomData(peepholeWeightsInfo.GetNumElements()); - std::vector cellToForgetWeightsData = GenerateRandomData(peepholeWeightsInfo.GetNumElements()); - std::vector cellToOutputWeightsData = GenerateRandomData(peepholeWeightsInfo.GetNumElements()); - - armnn::ConstTensor cellToInputWeights(peepholeWeightsInfo, cellToInputWeightsData); - armnn::ConstTensor cellToForgetWeights(peepholeWeightsInfo, cellToForgetWeightsData); - armnn::ConstTensor cellToOutputWeights(peepholeWeightsInfo, cellToOutputWeightsData); - - // Projection - std::vector projectionWeightsData = GenerateRandomData(projectionWeightsInfo.GetNumElements()); - std::vector projectionBiasData(outputSize, 1); - - armnn::ConstTensor projectionWeights(projectionWeightsInfo, projectionWeightsData); - armnn::ConstTensor projectionBias(biasInfo, projectionBiasData); - - // Layer Norm - std::vector inputLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - std::vector forgetLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - std::vector cellLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - std::vector outputLayerNormWeightsData = - GenerateRandomData(layerNormWeightsInfo.GetNumElements()); - - armnn::ConstTensor inputLayerNormWeights(layerNormWeightsInfo, inputLayerNormWeightsData); - armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsData); - armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsData); - armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsData); - - // Set up params - armnn::LstmInputParams params; - - // Mandatory params - params.m_InputToForgetWeights = &inputToForgetWeights; - params.m_InputToCellWeights = &inputToCellWeights; - params.m_InputToOutputWeights = &inputToOutputWeights; - - params.m_RecurrentToForgetWeights = &recurrentToForgetWeights; - params.m_RecurrentToCellWeights = &recurrentToCellWeights; - params.m_RecurrentToOutputWeights = &recurrentToOutputWeights; - - params.m_ForgetGateBias = &forgetGateBias; - params.m_CellBias = &cellBias; - params.m_OutputGateBias = &outputGateBias; - - // CIFG - params.m_InputToInputWeights = &inputToInputWeights; - params.m_RecurrentToInputWeights = &recurrentToInputWeights; - params.m_InputGateBias = &inputGateBias; - - // Peephole - params.m_CellToInputWeights = &cellToInputWeights; - params.m_CellToForgetWeights = &cellToForgetWeights; - params.m_CellToOutputWeights = &cellToOutputWeights; - - // Projection - params.m_ProjectionWeights = &projectionWeights; - params.m_ProjectionBias = &projectionBias; - - // Layer Norm - params.m_InputLayerNormWeights = &inputLayerNormWeights; - params.m_ForgetLayerNormWeights = &forgetLayerNormWeights; - params.m_CellLayerNormWeights = &cellLayerNormWeights; - params.m_OutputLayerNormWeights = &outputLayerNormWeights; - - // Create network - armnn::INetworkPtr network = armnn::INetwork::Create(); - const std::string layerName("qLstm"); - - armnn::IConnectableLayer* const input = network->AddInputLayer(0); - armnn::IConnectableLayer* const outputStateIn = network->AddInputLayer(1); - armnn::IConnectableLayer* const cellStateIn = network->AddInputLayer(2); - - armnn::IConnectableLayer* const qLstmLayer = network->AddQLstmLayer(descriptor, params, layerName.c_str()); - - armnn::IConnectableLayer* const outputStateOut = network->AddOutputLayer(0); - armnn::IConnectableLayer* const cellStateOut = network->AddOutputLayer(1); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(2); - - // Input/Output tensor info - armnn::TensorInfo inputInfo({numBatches , inputSize}, - armnn::DataType::QAsymmS8, - inputScale, - inputOffset); - - armnn::TensorInfo cellStateInfo({numBatches , numUnits}, - armnn::DataType::QSymmS16, - cellStateScale, - cellStateOffset); - - armnn::TensorInfo outputStateInfo({numBatches , outputSize}, - armnn::DataType::QAsymmS8, - outputScale, - outputOffset); - - // Connect input/output slots - input->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(inputInfo); - - outputStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(1)); - outputStateIn->GetOutputSlot(0).SetTensorInfo(cellStateInfo); - - cellStateIn->GetOutputSlot(0).Connect(qLstmLayer->GetInputSlot(2)); - cellStateIn->GetOutputSlot(0).SetTensorInfo(outputStateInfo); - - qLstmLayer->GetOutputSlot(0).Connect(outputStateOut->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(0).SetTensorInfo(outputStateInfo); - - qLstmLayer->GetOutputSlot(1).Connect(cellStateOut->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(1).SetTensorInfo(cellStateInfo); - - qLstmLayer->GetOutputSlot(2).Connect(outputLayer->GetInputSlot(0)); - qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - VerifyQLstmLayer checker(layerName, - {inputInfo, cellStateInfo, outputStateInfo}, - {outputStateInfo, cellStateInfo, outputStateInfo}, - descriptor, - params); - - deserializedNetwork->Accept(checker); + deserializedNetwork->ExecuteStrategy(verifier); } BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1