diff options
Diffstat (limited to 'src/armnn/test')
-rw-r--r-- | src/armnn/test/NetworkTests.cpp | 4 | ||||
-rw-r--r-- | src/armnn/test/RuntimeTests.cpp | 56 |
2 files changed, 58 insertions, 2 deletions
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index c342f22ced..f1319464fc 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -981,8 +981,8 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc) armnn::OptimizerOptions optimizerOptions; optimizerOptions.m_ReduceFp32ToFp16 = true; - armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(), - optimizerOptions); + armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize( + net, backends, runtime->GetDeviceSpec(), optimizerOptions); const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph(); diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 0c896d874a..0237387b93 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -485,4 +485,60 @@ BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef) BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success); } +BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue) +{ + // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929 + using namespace armnn; + + // Create runtime in which test will run + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + // build up the structure of the network + INetworkPtr net(INetwork::Create()); + armnn::IConnectableLayer* input = net->AddInputLayer( + 0, + "input" + ); + armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer( + armnn::SoftmaxDescriptor(), + "softmax" + ); + armnn::IConnectableLayer* output = net->AddOutputLayer( + 0, + "output" + ); + + input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0)); + softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo( + armnn::TensorShape({ 1, 5 }), + armnn::DataType::QuantisedAsymm8, + 1.0f/255, + 0 + )); + + softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo( + armnn::TensorShape({ 1, 5 }), + armnn::DataType::QuantisedAsymm8 + )); + + std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef}; + std::vector<std::string> errMessages; + armnn::IOptimizedNetworkPtr optNet = Optimize( + *net, + backends, + runtime->GetDeviceSpec(), + OptimizerOptions(), + errMessages + ); + + BOOST_TEST(errMessages.size() == 1); + BOOST_TEST(errMessages[0] == + "ERROR: output 0 of layer Softmax (softmax) is of type " + "Quantized 8 bit but its scale parameter has not been set"); + BOOST_TEST(!optNet); +} + BOOST_AUTO_TEST_SUITE_END() |