aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-12-11 19:27:11 +0000
committerMike Kelly <mike.kelly@arm.com>2019-12-11 19:27:11 +0000
commitc7d0d44812336f4db8ab9a649fb40b4d4ed27e97 (patch)
tree6dc06685a5e6f9458b1b063360e854f76b803b10
parent1df647726b9f5a1bb725e13919f1f7fa506865c9 (diff)
downloadandroid-nn-driver-c7d0d44812336f4db8ab9a649fb40b4d4ed27e97.tar.gz
Catch std::exception instead of armnn::Exception
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com> MLCE-143 Fix for reference implementation for service stopping when running CTS. *ConvertDepthwise reports that weights are not supported with the first dimension of the tensor is not 1 *ConvertConcat was missing one case for reporting unsupported inputs. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> MLCE-144 Cts NNAPI test cases failed * Fixed numerous CTS/VTS failures related to Quantization !armnn:2457 Signed-off-by: Mike Kelly <mike.kelly@arm.com> MLCE-144 Fix 2d pooling convert function Signed-off-by: FinnWilliamsArm <Finn.Williams@Arm.com> Change-Id: I054635ebfd52cb5575490c3bfaae0104eb1685cc
-rw-r--r--1.2/ArmnnDriverImpl.cpp10
-rw-r--r--1.2/HalPolicy.cpp6
-rw-r--r--ArmnnDevice.cpp6
-rw-r--r--ArmnnDriverImpl.cpp10
-rw-r--r--ArmnnPreparedModel.cpp12
-rw-r--r--ArmnnPreparedModel_1_2.cpp20
-rw-r--r--ConversionUtils.hpp26
-rw-r--r--ModelToINetworkConverter.cpp2
-rw-r--r--Utils.cpp3
9 files changed, 56 insertions, 39 deletions
diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp
index 8a444e5d..0d54c7ff 100644
--- a/1.2/ArmnnDriverImpl.cpp
+++ b/1.2/ArmnnDriverImpl.cpp
@@ -127,10 +127,10 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime
OptOptions,
errMessages);
}
- catch (armnn::Exception &e)
+ catch (std::exception& e)
{
std::stringstream message;
- message << "armnn::Exception (" << e.what() << ") caught from optimize.";
+ message << "Exception (" << e.what() << ") caught from optimize.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -162,10 +162,10 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime
return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
std::stringstream message;
- message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
+ message << "Exception (" << e.what() << ") caught from LoadNetwork.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -193,7 +193,7 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime
{
clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
}
- catch (const armnn::Exception& error)
+ catch (std::exception& error)
{
ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
options.GetClTunedParametersFile().c_str(), error.what());
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index e08ae84f..0a12fd24 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -317,6 +317,12 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
{
return Fail("%s: Operand is invalid", __func__);
}
+ if ( weightsOperand->dimensions[0] != 1)
+ {
+ return Fail("%s: Invalid weights; for depthwise convolution, dimension 0 must be 1 but it is %i",
+ __func__, weightsOperand->dimensions[0] );
+ }
+
armnn::DepthwiseConvolution2dDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
diff --git a/ArmnnDevice.cpp b/ArmnnDevice.cpp
index a03d9cc9..34019c32 100644
--- a/ArmnnDevice.cpp
+++ b/ArmnnDevice.cpp
@@ -64,7 +64,7 @@ ArmnnDevice::ArmnnDevice(DriverOptions options)
{
m_ClTunedParameters->Load(m_Options.GetClTunedParametersFile().c_str());
}
- catch (const armnn::Exception& error)
+ catch (std::exception& error)
{
// This is only a warning because the file won't exist the first time you are generating it.
ALOGW("ArmnnDevice: Failed to load CL tuned parameters file '%s': %s",
@@ -77,6 +77,10 @@ ArmnnDevice::ArmnnDevice(DriverOptions options)
{
ALOGE("ArmnnDevice: Failed to setup CL runtime: %s. Device will be unavailable.", error.what());
}
+ catch (std::exception& error)
+ {
+ ALOGE("ArmnnDevice: Unknown exception: %s. Device will be unavailable.", error.what());
+ }
#endif
runtimeOptions.m_EnableGpuProfiling = m_Options.IsGpuProfilingEnabled();
m_Runtime = armnn::IRuntime::Create(runtimeOptions);
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 64188bbf..d5fa9784 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -108,10 +108,10 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
OptOptions,
errMessages);
}
- catch (armnn::Exception &e)
+ catch (std::exception& e)
{
stringstream message;
- message << "armnn::Exception (" << e.what() << ") caught from optimize.";
+ message << "Exception (" << e.what() << ") caught from optimize.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -142,10 +142,10 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
stringstream message;
- message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
+ message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -173,7 +173,7 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
{
clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
}
- catch (const armnn::Exception& error)
+ catch (std::exception& error)
{
ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
options.GetClTunedParametersFile().c_str(), error.what());
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 462970aa..2f1abef7 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -209,9 +209,9 @@ Return<ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(const Request& reque
pOutputTensors->emplace_back(i, outputTensor);
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
+ ALOGW("Exception caught while preparing for EnqueueWorkload: %s", e.what());
NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
return ErrorStatus::GENERAL_FAILURE;
}
@@ -253,9 +253,9 @@ void ArmnnPreparedModel<HalVersion>::ExecuteGraph(
return;
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what());
+ ALOGW("Exception caught from EnqueueWorkload: %s", e.what());
cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
return;
}
@@ -306,9 +306,9 @@ bool ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
return false;
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("ExecuteWithDummyInputs: armnn::Exception caught from EnqueueWorkload: %s", e.what());
+ ALOGW("ExecuteWithDummyInputs: Exception caught from EnqueueWorkload: %s", e.what());
return false;
}
return true;
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index a7997c72..a1e481df 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -314,9 +314,9 @@ Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const Requ
pOutputTensors->emplace_back(i, outputTensor);
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
+ ALOGW("Exception caught while preparing for EnqueueWorkload: %s", e.what());
cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
return Void();
}
@@ -345,9 +345,9 @@ Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const Requ
return Void();
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what());
+ ALOGW("Exception caught from EnqueueWorkload: %s", e.what());
cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
return Void();
}
@@ -531,9 +531,9 @@ void ArmnnPreparedModel_1_2<HalVersion>::ExecuteGraph(
return;
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what());
+ ALOGW("Exception caught from EnqueueWorkload: %s", e.what());
cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming,
"ArmnnPreparedModel_1_2::ExecuteGraph");
return;
@@ -594,9 +594,9 @@ bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteWithDummyInputs()
return false;
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("ExecuteWithDummyInputs: armnn::Exception caught from EnqueueWorkload: %s", e.what());
+ ALOGW("ExecuteWithDummyInputs: Exception caught from EnqueueWorkload: %s", e.what());
return false;
}
return true;
@@ -707,9 +707,9 @@ Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const Request&
}
}
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
- ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
+ ALOGW("Exception caught while preparing for EnqueueWorkload: %s", e.what());
callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
return ErrorStatus::GENERAL_FAILURE;
}
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index cc06ccee..b631fb5a 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1349,7 +1349,7 @@ bool ConvertPooling2d(const HalOperation& operation,
LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
- return Fail("%s: Could not read input 0", operationName);
+ return Fail("%s: Operation Could not read input 0", operationName);
}
const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
@@ -1410,16 +1410,17 @@ bool ConvertPooling2d(const HalOperation& operation,
return Fail("%s: Operation has invalid inputs", operationName);
}
- const unsigned int inputWidth = inputInfo.GetShape()[2];
- const unsigned int inputHeight = inputInfo.GetShape()[1];
-
- CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
- CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
-
if (Is12Operand(*output))
{
desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
}
+
+ const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
+ const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
+ const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
+
+ CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
+ CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
}
bool isSupported = false;
@@ -1587,10 +1588,13 @@ bool ConvertConcatenation(const Operation& operation, const Model& model, Conver
return Fail("%s: Operation has invalid inputs", __func__);
}
- armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
- LayerInputHandle operandInputHandle =
- ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
+ LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
+ if (!operandInputHandle.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+ armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
if (operandShape.GetNumDimensions() == 0)
{
return Fail("%s: Operands with rank 0 are not supported", __func__);
@@ -1681,7 +1685,7 @@ bool ConvertConcatenation(const Operation& operation, const Model& model, Conver
concatDescriptor =
armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
}
- catch (const armnn::Exception& error)
+ catch (std::exception& error)
{
return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
}
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 4797ccfd..05e60462 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -25,7 +25,7 @@ ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<
{
Convert();
}
- catch (armnn::Exception& e)
+ catch (std::exception& e)
{
m_ConversionResult = ConversionResult::UnsupportedFeature;
ALOGE("%s: Unexpected exception: %s", __func__, e.what());
diff --git a/Utils.cpp b/Utils.cpp
index 43b65ee3..2f9a4a34 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -122,6 +122,9 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
type = armnn::DataType::QuantisedAsymm8;
break;
+ case V1_2::OperandType::TENSOR_QUANT8_SYMM:
+ type = armnn::DataType::QuantisedSymm8;
+ break;
case V1_2::OperandType::TENSOR_QUANT16_SYMM:
type = armnn::DataType::QuantisedSymm16;
break;