diff options
author | jimfly01 <jim.flynn@arm.com> | 2018-10-08 14:43:01 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:58 +0100 |
commit | 6b0b53d307f956c3d67429f2a93df91f2eb2f483 (patch) | |
tree | fcc03458f6396d9b9bee55674ca5af562ed5a16b /src/armnn/test/RuntimeTests.cpp | |
parent | 0d9d4193200e8bfbaf4cb8f32c866d5fe73a3e7e (diff) | |
download | armnn-6b0b53d307f956c3d67429f2a93df91f2eb2f483.tar.gz |
IVGCVSW-1929: Fix for this defect (QASYM8 no scale)
* Now hand in an optional vector of strings to Optimize
function in which errors/warning messages are placed.
* Optimize function changed to check outputs of each
layer. If they are Quantized 8 bit but the scale has
not been set an error message is added for each such output.
* Unit test modelled on defect report added to exercise the fix.
!android-nn-driver:152483
Change-Id: If9901f5324a516f1ab62858266b38f98dae16201
Diffstat (limited to 'src/armnn/test/RuntimeTests.cpp')
-rw-r--r-- | src/armnn/test/RuntimeTests.cpp | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 0c896d874a..0237387b93 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -485,4 +485,60 @@ BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef) BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success); } +BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue) +{ + // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929 + using namespace armnn; + + // Create runtime in which test will run + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + // build up the structure of the network + INetworkPtr net(INetwork::Create()); + armnn::IConnectableLayer* input = net->AddInputLayer( + 0, + "input" + ); + armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer( + armnn::SoftmaxDescriptor(), + "softmax" + ); + armnn::IConnectableLayer* output = net->AddOutputLayer( + 0, + "output" + ); + + input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0)); + softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo( + armnn::TensorShape({ 1, 5 }), + armnn::DataType::QuantisedAsymm8, + 1.0f/255, + 0 + )); + + softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo( + armnn::TensorShape({ 1, 5 }), + armnn::DataType::QuantisedAsymm8 + )); + + std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef}; + std::vector<std::string> errMessages; + armnn::IOptimizedNetworkPtr optNet = Optimize( + *net, + backends, + runtime->GetDeviceSpec(), + OptimizerOptions(), + errMessages + ); + + BOOST_TEST(errMessages.size() == 1); + BOOST_TEST(errMessages[0] == + "ERROR: output 0 of layer Softmax (softmax) is of type " + "Quantized 8 bit but its scale parameter has not been set"); + BOOST_TEST(!optNet); +} + BOOST_AUTO_TEST_SUITE_END() |