From fd627ffaec8fd8801d980b4c91ee7c0607ab6aaf Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Thu, 25 Feb 2021 17:44:00 +0000 Subject: IVGCVSW-5687 Update Doxygen Docu * Update Doxygen Documentation for 21.02 release Signed-off-by: Jan Eilers Change-Id: I9ed2f9caab038836ea99d7b378d7899fe431a4e5 --- 21.02/_quantizer_test_8cpp.xhtml | 307 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 307 insertions(+) create mode 100644 21.02/_quantizer_test_8cpp.xhtml (limited to '21.02/_quantizer_test_8cpp.xhtml') diff --git a/21.02/_quantizer_test_8cpp.xhtml b/21.02/_quantizer_test_8cpp.xhtml new file mode 100644 index 0000000000..5aaf550419 --- /dev/null +++ b/21.02/_quantizer_test_8cpp.xhtml @@ -0,0 +1,307 @@ + + + + + + + + + + + + + +ArmNN: src/armnn/test/QuantizerTest.cpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  21.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
QuantizerTest.cpp File Reference
+
+
+
#include "../Graph.hpp"
+#include "../Network.hpp"
+#include "../NetworkQuantizerUtils.hpp"
+#include "../OverrideInputRangeVisitor.hpp"
+#include <armnn/INetwork.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+#include <QuantizeHelper.hpp>
+#include <boost/test/unit_test.hpp>
+#include <unordered_map>
+
+

Go to the source code of this file.

+ + + + + +

+Namespaces

 armnn
 Copyright (c) 2021 ARM Limited and Contributors.
 
+ + + + + + + +

+Typedefs

using MinMaxRange = std::pair< float, float >
 
using MinMaxRanges = std::vector< MinMaxRange >
 
using MinMaxRangeMap = std::unordered_map< LayerGuid, MinMaxRanges >
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

void VisitLayersTopologically (const INetwork *inputNetwork, IStrategy &visitor)
 
TensorInfo GetInputTensorInfo (const INetwork *network)
 
TensorInfo GetInputTensorInfo (const NetworkImpl *network)
 
void TestNetwork (INetwork *network, const TensorShape inShape, const TensorShape outShape)
 
void TestNetwork (INetwork *network, const TensorShape shape)
 
 BOOST_AUTO_TEST_CASE (QuantizeAddition)
 
INetworkPtr CreateNetworkWithActivationLayer (const ActivationDescriptor &descriptor, const TensorShape &shape)
 
INetworkPtr CreateNetworkWithArgMinMaxLayer (const ArgMinMaxDescriptor &descriptor, const TensorShape &shape)
 
INetworkPtr CreateNetworkWithInputOutputLayers ()
 
 BOOST_AUTO_TEST_CASE (InputOutputLayerDynamicQuant)
 
 BOOST_AUTO_TEST_CASE (QuantizeAbsActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeArgMax)
 
 BOOST_AUTO_TEST_CASE (QuantizeLinearActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeReLuActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeSoftReLuActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeBoundedReluActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeTanHActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeLeakyReLuActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeELuActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeHardSwishActivation)
 
 BOOST_AUTO_TEST_CASE (QuantizeBatchNorm)
 
 BOOST_AUTO_TEST_CASE (QuantizeDepthToSpace)
 
 BOOST_AUTO_TEST_CASE (OverrideInputRangeEmptyNetwork)
 
 BOOST_AUTO_TEST_CASE (OverrideInputRangeNoInputLayers)
 
 BOOST_AUTO_TEST_CASE (OverrideInputRangeInputLayers)
 
INetworkPtr CreateNetworkWithFullyConnectedLayer (const bool biasEnabled, const TensorShape &inputShape, const TensorShape &outputShape)
 
void ValidateFullyConnectedLayer (const bool biasEnabled)
 
 BOOST_AUTO_TEST_CASE (QuantizeFill)
 
 BOOST_AUTO_TEST_CASE (QuantizeFullyConnected)
 
 BOOST_AUTO_TEST_CASE (QuantizeFullyConnectedBiasEnabled)
 
void TestQuantizeConvolution2d (bool useBiases)
 
 BOOST_AUTO_TEST_CASE (QuantizeConvolution2d)
 
 BOOST_AUTO_TEST_CASE (QuantizeConvolution2dWithBiases)
 
void TestQuantizeDepthwiseConvolution2d (bool useBiases)
 
 BOOST_AUTO_TEST_CASE (QuantizeDepthwiseConvolution2d)
 
 BOOST_AUTO_TEST_CASE (QuantizeDepthwiseConvolution2dWithBiases)
 
 BOOST_AUTO_TEST_CASE (QuantizeInstanceNormalization)
 
 BOOST_AUTO_TEST_CASE (QuantizeLogSoftmax)
 
INetworkPtr CreateNetworkWithSoftmaxLayer (const SoftmaxDescriptor &descriptor, const TensorShape &shape)
 
 BOOST_AUTO_TEST_CASE (QuantizeSoftmax)
 
 BOOST_AUTO_TEST_CASE (QuantizeStandIn)
 
IConnectableLayer * CreateStartOfLeakyReluNetwork (INetwork *network, const TensorInfo &info)
 
void CompleteLeakyReluNetwork (INetwork *network, IConnectableLayer *activation, IConnectableLayer *layerUnderTest, const TensorInfo &info)
 
 BOOST_AUTO_TEST_CASE (QuantizePermute)
 
 BOOST_AUTO_TEST_CASE (QuantizeSpaceToBatch)
 
 BOOST_AUTO_TEST_CASE (QuantizeSpaceToDepth)
 
 BOOST_AUTO_TEST_CASE (QuantizePooling2d)
 
 BOOST_AUTO_TEST_CASE (QuantizeConstant)
 
 BOOST_AUTO_TEST_CASE (QuantizeArgMinMax)
 
 BOOST_AUTO_TEST_CASE (QuantizeComparison)
 
 BOOST_AUTO_TEST_CASE (QuantizeConcat)
 
 BOOST_AUTO_TEST_CASE (QuantizeReshape)
 
 BOOST_AUTO_TEST_CASE (QuantizeSplitter)
 
 BOOST_AUTO_TEST_CASE (QuantizeResize)
 
 BOOST_AUTO_TEST_CASE (QuantizeStridedSlice)
 
 BOOST_AUTO_TEST_CASE (QuantizeBatchToSpace)
 
 BOOST_AUTO_TEST_CASE (QuantizePrelu)
 
void TestQuantizeTransposeConvolution2d (bool useBiases)
 
 BOOST_AUTO_TEST_CASE (QuantizeTransposeConvolution2d)
 
 BOOST_AUTO_TEST_CASE (QuantizeTransposeConvolution2dWithBiases)
 
 BOOST_AUTO_TEST_CASE (QuantizeStack)
 
 BOOST_AUTO_TEST_CASE (QuantizeSlice)
 
std::vector< uint8_t > SetupQuantize (float value)
 
 BOOST_AUTO_TEST_CASE (QuantizeInf)
 
 BOOST_AUTO_TEST_CASE (QuantizeNegativeInf)
 
void PreserveTypeTestImpl (const DataType &dataType)
 
 BOOST_AUTO_TEST_CASE (PreserveTypeFloat32)
 
 BOOST_AUTO_TEST_CASE (PreserveTypeQAsymmU8)
 
 BOOST_AUTO_TEST_CASE (PreserveTypeQsymm8)
 
 BOOST_AUTO_TEST_CASE (PreserveTypeQsymm16)
 
 BOOST_AUTO_TEST_CASE (TestConnectionPreservationAfterDynamicQuant)
 
+ + + + + + + + + + + +

+Variables

const float g_AsymmU8QuantizationBase = 255.0f
 
const float g_AsymmS8QuantizationBase = 255.0f
 
const float g_SymmS8QuantizationBase = 127.0f
 
const float g_SymmS16QuantizationBase = 32767.0f
 
const float g_TestTolerance = 0.000001f
 
+
+
+ + + + -- cgit v1.2.1