aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-06-18 17:21:36 +0100
committerSadik Armagan <sadik.armagan@arm.com>2019-06-18 17:21:36 +0100
commit44bcc02a40e2e4fdfab6eb4ff91a2547ed0d98a0 (patch)
tree346d910d6e7ac090143b545979e88b46adbc24d3
parentb23732bcd6cb51bc7a4da369b1654bfa02a70c00 (diff)
downloadandroid-nn-driver-44bcc02a40e2e4fdfab6eb4ff91a2547ed0d98a0.tar.gz
IVGCVSW-3136 Run VTS tests with 1.2 Driver
* Updated android-nn-driver to run VTS tests with 1.2 Driver Change-Id: I99a73b1e32dcf5bc655d88c2dbe0e610e245ea15 Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
-rw-r--r--ConversionUtils.hpp88
-rw-r--r--ModelToINetworkConverter.cpp14
2 files changed, 56 insertions, 46 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 8b63f780..03f46696 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -900,6 +900,7 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
ConversionData& data)
{
using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
@@ -915,58 +916,67 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
return LayerInputHandle();
}
- armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
-
- switch (operand->lifetime)
+ try
{
- case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
- case HalOperandLifeTime::MODEL_INPUT:
- case HalOperandLifeTime::MODEL_OUTPUT:
- {
- // The tensor is either an operand internal to the model, or a model input.
- // It can be associated with an ArmNN output slot for an existing layer.
+ armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
- // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
- const uint32_t operandIndex = operation.inputs[inputIndex];
- return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
- break;
- }
- case HalOperandLifeTime::CONSTANT_COPY:
- case HalOperandLifeTime::CONSTANT_REFERENCE:
+ switch (operand->lifetime)
{
- // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
- ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
- if (tensorPin.IsValid())
+ case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
+ case HalOperandLifeTime::MODEL_INPUT:
+ case HalOperandLifeTime::MODEL_OUTPUT:
{
- if (!IsLayerSupportedForAnyBackend(__func__,
- armnn::IsConstantSupported,
- data.m_Backends,
- tensorPin.GetConstTensor().GetInfo()))
+ // The tensor is either an operand internal to the model, or a model input.
+ // It can be associated with an ArmNN output slot for an existing layer.
+
+ // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
+ const uint32_t operandIndex = operation.inputs[inputIndex];
+ return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
+ break;
+ }
+ case HalOperandLifeTime::CONSTANT_COPY:
+ case HalOperandLifeTime::CONSTANT_REFERENCE:
+ {
+ // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
+ ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
+ if (tensorPin.IsValid())
+ {
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsConstantSupported,
+ data.m_Backends,
+ tensorPin.GetConstTensor().GetInfo()))
+ {
+ return LayerInputHandle();
+ }
+
+ armnn::IConnectableLayer* constantLayer =
+ data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
+ armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
+
+ return LayerInputHandle(true, &outputSlot, operandTensorInfo);
+ }
+ else
{
+ Fail("%s: invalid operand tensor", __func__);
return LayerInputHandle();
}
-
- armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
- armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
-
- return LayerInputHandle(true, &outputSlot, operandTensorInfo);
+ break;
}
- else
+ default:
{
- Fail("%s: invalid operand tensor", __func__);
+ // Unsupported lifetime for an input tensor
+ Fail("%s: unsupported lifetime for input tensor: %s",
+ __func__, toString(operand->lifetime).c_str());
return LayerInputHandle();
}
- break;
- }
- default:
- {
- // Unsupported lifetime for an input tensor
- Fail("%s: unsupported lifetime for input tensor: %s",
- __func__, toString(operand->lifetime).c_str());
- return LayerInputHandle();
}
}
+ catch (UnsupportedOperand<HalOperandType>& e)
+ {
+ Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
+ return LayerInputHandle();
+ }
}
template<typename HalPolicy,
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 96a65604..4797ccfd 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -36,9 +36,9 @@ ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<
template<typename HalPolicy>
void ModelToINetworkConverter<HalPolicy>::Convert()
{
- using HalModel = typename HalPolicy::Model;
- using Operand = typename HalPolicy::Operand;
- using OperandType = typename HalPolicy::OperationType;
+ using HalModel = typename HalPolicy::Model;
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
@@ -70,7 +70,7 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
{
// inputs in android nn are represented by operands
uint32_t inputIndex = m_Model.inputIndexes[i];
- const Operand& operand = m_Model.operands[inputIndex];
+ const HalOperand& operand = m_Model.operands[inputIndex];
const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i);
@@ -81,7 +81,7 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot;
}
}
- catch (UnsupportedOperand<OperandType>& e)
+ catch (UnsupportedOperand<HalOperandType>& e)
{
Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
m_ConversionResult = ConversionResult::UnsupportedFeature;
@@ -109,7 +109,7 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
{
ok = HalPolicy::ConvertOperation(operation, m_Model, m_Data);
}
- catch (UnsupportedOperand<OperandType>& e)
+ catch (UnsupportedOperand<HalOperandType>& e)
{
Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
ok = false;
@@ -139,7 +139,7 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
{
// outputs in android nn are represented by operands
uint32_t outputIndex = m_Model.outputIndexes[i];
- const Operand& operand = m_Model.operands[outputIndex];
+ const HalOperand& operand = m_Model.operands[outputIndex];
const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i);