diff options
author | josh minor <josh.minor@arm.com> | 2020-01-06 16:40:46 -0600 |
---|---|---|
committer | Derek Lamberti <derek.lamberti@arm.com> | 2020-01-23 14:29:14 +0000 |
commit | 4a3c61091037e7e86e8b03bb060d8c1ab82731a9 (patch) | |
tree | 928644023400ad5ac0c26b33dfff2f975567d6e8 /src/armnn/test/QuantizerTest.cpp | |
parent | 190a39a4a9598e42b636ae4ab843761884148160 (diff) | |
download | armnn-4a3c61091037e7e86e8b03bb060d8c1ab82731a9.tar.gz |
IVGCVSW-4259 Add frontend and reference workload for UnaryOperationLayer
* Added new layer named ElementwiseUnary
* Deprecated existing Abs/Rsqrt layer functions
* Updated existing Abs/Rsqrt test infrastructure to use new layer
* Added boilerplate for new Exp,Neg,Sqrt elemwise op layers
* AbsQuantize test removed pending future commit
* Serialization support added
!android-nn-driver:2550
Change-Id: Ic595c645925e17b45db568187fd05646daf2e87f
Signed-off-by: josh minor <josh.minor@arm.com>
Diffstat (limited to 'src/armnn/test/QuantizerTest.cpp')
-rw-r--r-- | src/armnn/test/QuantizerTest.cpp | 55 |
1 files changed, 0 insertions, 55 deletions
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 52beb630f9..d568b2cbc0 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -1672,61 +1672,6 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant) VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); } -BOOST_AUTO_TEST_CASE(QuantizeAbs) -{ - class TestAbsQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestAbsQuantization(const TensorShape& inputShape, const TensorShape& outputShape) : - TestLeakyReLuActivationQuantization(inputShape, outputShape) - {} - - TestAbsQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) : - TestLeakyReLuActivationQuantization(options, inputShape, outputShape) - {} - - void VisitAbsLayer(const IConnectableLayer *layer, - const char *name = nullptr) override - { - boost::ignore_unused(name); - TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); - - TestQuantizationParams(outputInfo, - { 30.0f / g_Asymm8QuantizationBase, 128 }, - { 15.0f / g_Symm8QuantizationBase, 0}, - { 15.0f / g_Symm16QuantizationBase, 0 }); - } - }; - - INetworkPtr network = INetwork::Create(); - - //Add the layer being tested - IConnectableLayer* absLayer = network->AddAbsLayer(); - - const TensorShape shape{1U}; - TensorInfo info(shape, DataType::Float32); - - IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info); - - CompleteLeakyReluNetwork(network.get(), activation, absLayer, info); - - INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestAbsQuantization validatorQAsymm8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8); - - const QuantizerOptions qSymm8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options)->ExportNetwork(); - TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - - const QuantizerOptions qSymm16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); - TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); -} - BOOST_AUTO_TEST_CASE(QuantizeArgMinMax) { class TestArgMinMaxQuantization : public TestQuantization |