diff options
author | jimfly01 <jim.flynn@arm.com> | 2018-10-08 14:43:01 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:58 +0100 |
commit | 6b0b53d307f956c3d67429f2a93df91f2eb2f483 (patch) | |
tree | fcc03458f6396d9b9bee55674ca5af562ed5a16b /src/armnn/Network.cpp | |
parent | 0d9d4193200e8bfbaf4cb8f32c866d5fe73a3e7e (diff) | |
download | armnn-6b0b53d307f956c3d67429f2a93df91f2eb2f483.tar.gz |
IVGCVSW-1929: Fix for this defect (QASYM8 no scale)
* Now hand in an optional vector of strings to Optimize
function in which errors/warning messages are placed.
* Optimize function changed to check outputs of each
layer. If they are Quantized 8 bit but the scale has
not been set an error message is added for each such output.
* Unit test modelled on defect report added to exercise the fix.
!android-nn-driver:152483
Change-Id: If9901f5324a516f1ab62858266b38f98dae16201
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r-- | src/armnn/Network.cpp | 74 |
1 files changed, 63 insertions, 11 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 8e6f49005b..51490e33c4 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -69,10 +69,35 @@ Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const return m_Graph->SerializeToDot(stream); } +bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages) +{ + bool noErrors = true; + unsigned int numOutputs = layer->GetNumOutputSlots(); + for (unsigned int i = 0; i < numOutputs; i++) { + const OutputSlot &outputSlot = layer->GetOutputSlot(i); + const TensorInfo &info = outputSlot.GetTensorInfo(); + if (DataType::QuantisedAsymm8 == info.GetDataType()) { + if (0.f == info.GetQuantizationScale()) { + noErrors = false; + std::stringstream ss; + ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) + << " (" << layer->GetNameStr() << ") is of type" + << " Quantized 8 bit but its scale parameter has not been set"; + BOOST_LOG_TRIVIAL(warning) << ss.str() ; + if (errMessages) { + errMessages.value().push_back(ss.str()); + } + } + } + } + return noErrors; +} + IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, const std::vector<armnn::Compute>& backendPreferences, const IDeviceSpec& deviceSpec, - const OptimizerOptions& options) + const OptimizerOptions& options, + Optional<std::vector<std::string>&> errMessages) { if (backendPreferences.empty()) { throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified"); @@ -123,24 +148,41 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } } if (availablePreferredBackends.empty()) { - BOOST_LOG_TRIVIAL(warning) << "None of the preferred backends " << backendPreferences - << " are supported. Current platform provides " << spec.m_SupportedComputeDevices; - return {nullptr, &IOptimizedNetwork::Destroy}; + std::stringstream failureMsg; + failureMsg << "ERROR: None of the preferred backends " << backendPreferences + << " are supported. Current platform provides " << spec.m_SupportedComputeDevices; + BOOST_LOG_TRIVIAL(warning) << failureMsg.str(); + if (errMessages) { + errMessages.value().push_back(failureMsg.str()); + } + return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy); } auto ReturnWithError = [&](Layer* layer) { - BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType()) - << " is not supported on any preferred backend " << backendPreferences; + std::stringstream failureMsg; + failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType()) + << " is not supported on any preferred backend " << backendPreferences; + BOOST_LOG_TRIVIAL(warning) << failureMsg.str(); + if (errMessages) { + errMessages.value().push_back(failureMsg.str()); + } return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy); }; // Assign a compute device for all nodes + bool bErrorFound = false; for (auto&& layer : optNetObjPtr->GetGraph()) { DataType dataType = layer->GetDataType(); std::string reasonIfUnsupported; bool found = false; + if (!CheckScaleSetOnQuantizedType(layer, errMessages)) + { + // don't bomb immediately, find all the quantized outputs + // which haven't had a scale set and report them all back. + bErrorFound = true; + } for (const armnn::Compute& backend : availablePreferredBackends) { // need to set the compute device on the layer @@ -216,11 +258,16 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, break; } } - BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType()) - << " is not supported on requested backend " << layer->GetComputeDevice() - << " for data type " << GetDataTypeName(dataType) - << " (reason: " << reasonIfUnsupported - << "), falling back to the next backend."; + std::stringstream warningMsg; + warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType()) + << " is not supported on requested backend " << layer->GetComputeDevice() + << " for data type " << GetDataTypeName(dataType) + << " (reason: " << reasonIfUnsupported + << "), falling back to the next backend."; + BOOST_LOG_TRIVIAL(warning) << warningMsg.str(); + if (errMessages) { + errMessages.value().push_back(warningMsg.str()); + } } else { @@ -248,6 +295,10 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } } } + if (bErrorFound) + { + return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy); + } Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(), OptimizeInverseConversionsFp32())); @@ -261,6 +312,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, return optNet; } + Network::Network() : m_Graph(std::make_unique<Graph>()) { |