aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-06-14 14:54:52 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-06-14 14:04:58 +0000
commitcd700e4f0db201bc3066605058dc1c87d483833f (patch)
tree9aeb9d488391d1010165478c0cb7d9397c7f2c76
parente205318c65191990ad5f3e388a0301bc91d88696 (diff)
downloadandroid-nn-driver-cd700e4f0db201bc3066605058dc1c87d483833f.tar.gz
IVGCVSW-3280 Refactor conversion methods to depend only on HalPolicy
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I162cdca922655d1bd71b18fc5d2937351f8879be
-rw-r--r--1.0/HalPolicy.cpp287
-rw-r--r--1.0/HalPolicy.hpp1
-rw-r--r--1.1/HalPolicy.cpp110
-rw-r--r--1.1/HalPolicy.hpp1
-rw-r--r--1.2/HalPolicy.cpp4
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--ConversionUtils.hpp493
7 files changed, 508 insertions, 390 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 8acb0d4b..332295d1 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -25,11 +25,11 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_0::OperationType::CONCATENATION:
return ConvertConcatenation(operation, model, data);
case V1_0::OperationType::CONV_2D:
- return ValidateConv2dParameters(operation)
- && ConvertConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
+ return ValidateConv2dParameters(operation) &&
+ ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
case V1_0::OperationType::DEPTHWISE_CONV_2D:
- return ValidateDepthwiseConv2dParameters(operation)
- && ConvertDepthwiseConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
+ return ValidateDepthwiseConv2dParameters(operation) &&
+ ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
case V1_0::OperationType::DEQUANTIZE:
return ConvertDequantize(operation, model, data);
case V1_0::OperationType::FLOOR:
@@ -90,8 +90,8 @@ bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
+ LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
if (!input0.IsValid() || !input1.IsValid())
{
@@ -101,12 +101,12 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
// The FuseActivation parameter is always the input index 2
// and it should be optional
ActivationFn activationFunction;
- if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
+ if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return false;
@@ -133,7 +133,7 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
if (endLayer != nullptr)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
}
else
{
@@ -143,7 +143,7 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
{
- return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
+ return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
}
bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
@@ -158,12 +158,12 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
const std::size_t numInputTensors = operation.inputs.size() - 1;
int32_t concatDim;
- if (!GetInputScalar<Operand, OperandType>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
+ if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has no outputs", __func__);
@@ -199,14 +199,15 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
for (uint32_t i = 0; i < numInputTensors; ++i)
{
- const Operand* const operand = GetInputOperand<Operand>(operation, i, model);
+ const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
if (!operand)
{
return Fail("%s: Operation has invalid inputs", __func__);
}
armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
- LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<Operand>(operation, i, model, data);
+ LayerInputHandle operandInputHandle =
+ ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
if (operandShape.GetNumDimensions() == 0)
{
@@ -274,7 +275,8 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
- bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
+ bool needPermute =
+ CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
if (needPermute)
{
@@ -366,19 +368,19 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
);
}
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid input", __func__);
}
- const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has invalid outputs", __func__);
@@ -397,18 +399,18 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has invalid outputs", __func__);
@@ -427,18 +429,18 @@ bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, Con
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -448,8 +450,10 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
// ArmNN does not currently support non-fixed weights or bias
- ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 1, model, data); // 2D
- ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 2, model, data); // 1D
+ ConstTensorPin weightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
+ ConstTensorPin biasPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
if (!weightsPin.IsValid() || !biasPin.IsValid())
{
@@ -471,7 +475,7 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
ActivationFn activationFunction;
- if (!GetInputActivationFunction<Operand, OperandType>(operation, 3, activationFunction, model, data))
+ if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -514,7 +518,7 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
input.Connect(startLayer->GetInputSlot(0));
}
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
}
else
{
@@ -526,13 +530,13 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
const Model& model,
ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -548,10 +552,10 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
if (!input.IsValid() ||
- !GetInputScalar<Operand, OperandType>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
- !GetInputFloat32<Operand, OperandType>(operation, 2, descriptor.m_K, model, data) ||
- !GetInputFloat32<Operand, OperandType>(operation, 3, descriptor.m_Alpha, model, data) ||
- !GetInputFloat32<Operand, OperandType>(operation, 4, descriptor.m_Beta, model, data))
+ !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
+ !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
+ !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
+ !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -575,7 +579,7 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
@@ -583,7 +587,7 @@ bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model,
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::Sigmoid;
- return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
+ return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
@@ -591,19 +595,19 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Inputs:
// 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
// “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0: input", __func__);
}
// 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
- LayerInputHandle outputStateIn = ConvertToLayerInputHandle<Operand>(operation, 18, model, data);
+ LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
if (!outputStateIn.IsValid())
{
return Fail("%s: Could not read input 18: outputStateIn", __func__);
}
// 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
- LayerInputHandle cellStateIn = ConvertToLayerInputHandle<Operand>(operation, 19, model, data);
+ LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
if (!cellStateIn.IsValid())
{
return Fail("%s: Could not read input 19: cellStateIn", __func__);
@@ -612,33 +616,37 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Get the mandatory input tensors:
// 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size].
- const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 2, model,
- data);
- // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
- const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 3, model,
- data);
+ const ConstTensorPin inputToForgetWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ const ConstTensorPin inputToCellWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
// 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size].
- const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 4, model,
- data);
+ const ConstTensorPin inputToOutputWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
// 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- const ConstTensorPin recurrentToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 6,
- model, data);
+ const ConstTensorPin recurrentToForgetWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
// 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 7, model,
- data);
+ const ConstTensorPin recurrentToCellWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
// 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
const ConstTensorPin recurrentToOutputWeightsPin =
- ConvertOperationInputToConstTensorPin<Operand>(operation, 8, model, data);
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
// 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 13, model, data);
+ const ConstTensorPin forgetGateBiasPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
// 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 14, model, data);
+ const ConstTensorPin cellBiasPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
// 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 15, model, data);
+ const ConstTensorPin outputGateBiasPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
if (!inputToForgetWeightsPin.IsValid() ||
!inputToCellWeightsPin.IsValid() ||
@@ -656,32 +664,87 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Get the optional input tensors:
// 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size], where “num_units” corresponds to the number of cell units.
- const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 1, model,
- data, g_DontPermute, nullptr, true);
+ const ConstTensorPin inputToInputWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 1,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
// “num_units”), or the second dimension of the “projection_weights”, if defined.
- const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 5,
- model, data, g_DontPermute, nullptr, true);
+ const ConstTensorPin recurrentToInputWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 5,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 9, model,
- data, g_DontPermute, nullptr, true);
+ const ConstTensorPin cellToInputWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 9,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 10, model,
- data, g_DontPermute, nullptr, true);
+ const ConstTensorPin cellToForgetWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 10,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 11, model,
- data, g_DontPermute, nullptr, true);
+ const ConstTensorPin cellToOutputWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 11,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 12, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin inputGateBiasPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 12,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [output_size, num_units].
- const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 16, model,
- data, g_DontPermute, nullptr, true);
+ const ConstTensorPin projectionWeightsPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 16,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
+
// 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
- const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 17, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin projectionBiasPin =
+ ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
+ 17,
+ model,
+ data,
+ g_DontPermute,
+ nullptr,
+ true);
if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
(!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
@@ -705,36 +768,36 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
ActivationFn activation;
float cellClip;
float projClip;
- if (!GetInputActivationFunctionFromTensor<Operand, OperandType>(operation, 20, activation, model, data) ||
- !GetInputScalar<Operand, OperandType>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
- !GetInputScalar<Operand, OperandType>(operation, 22, OperandType::FLOAT32, projClip, model, data))
+ if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
+ !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
+ !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
{
return Fail("%s: Operation has invalid scalar inputs", __func__);
}
// Outputs:
- // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
- // CIFG, or [batch_size, num_units * 3] without CIFG.
- const Operand* scratchBuffer = GetOutputOperand<Operand>(operation, 0, model);
+ // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
+ // with CIFG, or [batch_size, num_units * 3] without CIFG.
+ const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!scratchBuffer)
{
return Fail("%s: Could not read output 0: scratchBuffer", __func__);
}
// 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
- const Operand* outputStateOut = GetOutputOperand<Operand>(operation, 1, model);
+ const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
if (!outputStateOut)
{
return Fail("%s: Could not read output 1: outputStateOut", __func__);
}
// 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
- const Operand* cellStateOut = GetOutputOperand<Operand>(operation, 2, model);
+ const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
if (!cellStateOut)
{
return Fail("%s: Could not read output 2: cellStateOut", __func__);
}
// 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
// effectively the same as the current “output state (out)” value.
- const Operand* output = GetOutputOperand<Operand>(operation, 3, model);
+ const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
if (!output)
{
return Fail("%s: Could not read output 3: output", __func__);
@@ -894,21 +957,21 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
outputStateIn.Connect(layer->GetInputSlot(1));
cellStateIn.Connect(layer->GetInputSlot(2));
- return (SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, 0, model, data) &&
- SetupAndTrackLayerOutputSlot<Operand>(operation, 1, *layer, 1, model, data) &&
- SetupAndTrackLayerOutputSlot<Operand>(operation, 2, *layer, 2, model, data) &&
- SetupAndTrackLayerOutputSlot<Operand>(operation, 3, *layer, 3, model, data));
+ return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
+ SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
+ SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
+ SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
}
bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -934,23 +997,23 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model&
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
{
- return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
+ return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
}
bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
{
- return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
+ return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
}
bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
+ LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
if (!input0.IsValid() || !input1.IsValid())
{
@@ -960,12 +1023,12 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve
// The FuseActivation parameter is always the input index 2
// and it should be optional
ActivationFn activationFunction;
- if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
+ if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (outputOperand == nullptr)
{
@@ -993,7 +1056,7 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve
if (endLayer != nullptr)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
}
else
{
@@ -1006,7 +1069,7 @@ bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, Conv
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::ReLu;
- return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
+ return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
@@ -1016,7 +1079,7 @@ bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, Con
desc.m_A = 1.0f;
desc.m_B = -1.0f;
- return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
+ return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
@@ -1025,18 +1088,18 @@ bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, Con
desc.m_Function = armnn::ActivationFunction::BoundedReLu;
desc.m_A = 6.0f;
- return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
+ return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has no outputs", __func__);
@@ -1045,7 +1108,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
armnn::SoftmaxDescriptor desc;
- if (!GetInputFloat32<Operand, OperandType>(operation, 1, desc.m_Beta, model, data))
+ if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1064,7 +1127,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
@@ -1074,14 +1137,14 @@ bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, Conv
desc.m_A = 1.0f; // android nn does not support tanH parameters
desc.m_B = 1.0f; // set to 1.0f for unity scaling
- return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
+ return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
{
- const Operand* inputOperand = GetInputOperand<Operand>(operation, 0, model);
- const Operand* requestedShapeOperand = GetInputOperand<Operand>(operation, 1, model);
- const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
+ const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
+ const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (inputOperand == nullptr
|| requestedShapeOperand == nullptr
@@ -1098,7 +1161,7 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
}
std::vector<int32_t> targetDimensions;
- if (!GetTensorInt32Values<Operand, OperandType>(*requestedShapeOperand, targetDimensions, model, data))
+ if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
{
return Fail("%s: Could not read values of input 1", __func__);
}
@@ -1119,7 +1182,7 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
}
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0", __func__);
@@ -1142,18 +1205,18 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0", __func__);
}
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -1175,8 +1238,8 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m
}
- if ( !GetInputScalar<Operand, OperandType>(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
- || !GetInputScalar<Operand, OperandType>(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
+ if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data) ||
+ !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1188,7 +1251,7 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
index 844b67c7..094c47c5 100644
--- a/1.0/HalPolicy.hpp
+++ b/1.0/HalPolicy.hpp
@@ -21,6 +21,7 @@ class HalPolicy
public:
using Model = V1_0::Model;
using Operand = V1_0::Operand;
+ using OperandLifeTime = V1_0::OperandLifeTime;
using OperandType = V1_0::OperandType;
using Operation = V1_0::Operation;
using OperationType = V1_0::OperationType;
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 2584e327..fc4c7a2c 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -102,8 +102,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
+ LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
if (!input0.IsValid() || !input1.IsValid())
{
@@ -113,12 +113,12 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
// The FuseActivation parameter is always the input index 2
// and it should be optional
ActivationFn activationFunction;
- if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
+ if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return false;
@@ -145,7 +145,7 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
if (endLayer)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
}
return Fail("%s: ProcessActivation failed", __func__);
@@ -153,8 +153,8 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
+ LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
if (!input0.IsValid() || !input1.IsValid())
{
@@ -164,12 +164,12 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
// The FuseActivation parameter is always the input index 2
// and it should be optional
ActivationFn activationFunction;
- if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
+ if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return false;
@@ -196,7 +196,7 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
if (endLayer)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
}
return Fail("%s: ProcessActivation failed", __func__);
@@ -204,20 +204,20 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* axisOperand = GetInputOperand<Operand>(operation, 1, model);
+ const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
if (!axisOperand)
{
return Fail("%s: Could not read input 1", __func__);
}
std::vector<int32_t> axis;
- if (!GetTensorInt32Values<Operand, OperandType>(*axisOperand, axis, model, data))
+ if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data))
{
return Fail("%s: Input 1 has invalid values", __func__);
}
@@ -233,7 +233,7 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
// Get the "keep dims" flag.
int32_t keepDims = 0;
- if (!GetInputInt32<Operand, OperandType>(operation, 2, keepDims, model, data))
+ if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 2, keepDims, model, data))
{
return Fail("%s: Could not read input 2", __func__);
}
@@ -242,7 +242,7 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
descriptor.m_KeepDims = keepDims > 0;
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -264,12 +264,12 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
@@ -278,7 +278,7 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- const Operand* paddingsOperand = GetInputOperand<Operand>(operation, 1, model);
+ const Operand* paddingsOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
if (!paddingsOperand)
{
@@ -286,14 +286,14 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
}
unsigned int rank = inputInfo.GetNumDimensions();
- armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand<HalPolicy::Operand>(*paddingsOperand);
+ armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
{
return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
}
std::vector<int32_t> paddings;
- GetTensorInt32Values<Operand, OperandType>(*paddingsOperand, paddings, model, data);
+ GetTensorInt32Values<hal_1_1::HalPolicy>(*paddingsOperand, paddings, model, data);
// add padding for each dimension of input tensor.
armnn::PadDescriptor descriptor;
@@ -308,7 +308,7 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
}
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -331,12 +331,12 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
input.Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
@@ -352,17 +352,17 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
Fail("%s: Only inputs with rank 4 are supported", __func__);
}
- const Operand* blockShapeOperand = GetInputOperand<Operand>(operation, 1, model);
- const Operand* paddingsOperand = GetInputOperand<Operand>(operation, 2, model);
+ const Operand* blockShapeOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
+ const Operand* paddingsOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
- armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand<Operand>(*blockShapeOperand);
+ armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
{
return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
}
std::vector<int32_t> blockShape;
- GetTensorInt32Values<Operand, OperandType>(*blockShapeOperand, blockShape, model, data);
+ GetTensorInt32Values<hal_1_1::HalPolicy>(*blockShapeOperand, blockShape, model, data);
if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
{
return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
@@ -376,7 +376,7 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
std::vector<std::pair<unsigned int, unsigned int>> paddingList;
std::vector<int32_t> paddings;
- GetTensorInt32Values<Operand, OperandType>(*paddingsOperand, paddings, model, data);
+ GetTensorInt32Values<hal_1_1::HalPolicy>(*paddingsOperand, paddings, model, data);
for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
{
int paddingBeforeInput = paddings[i];
@@ -394,7 +394,7 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -415,12 +415,12 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
@@ -437,7 +437,7 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
// NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
// if the operand index is out of bounds.
- const Operand* axisOperand = GetInputOperand<Operand>(operation, 1, model, false);
+ const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
@@ -449,7 +449,7 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
}
else
{
- GetTensorInt32Values<Operand, OperandType>(*axisOperand, axis, model, data);
+ GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data);
}
@@ -472,7 +472,7 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
armnn::ReshapeDescriptor reshapeDesc;
reshapeDesc.m_TargetShape = outputInfo.GetShape();
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -491,12 +491,12 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
@@ -509,9 +509,9 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
}
- const Operand* beginOperand = GetInputOperand<Operand>(operation, 1, model);
- const Operand* endOperand = GetInputOperand<Operand>(operation, 2, model);
- const Operand* stridesOperand = GetInputOperand<Operand>(operation, 3, model);
+ const Operand* beginOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
+ const Operand* endOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
+ const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
std::vector<int32_t> beginValues;
std::vector<int32_t> endValues;
@@ -520,7 +520,7 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
// The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
{
- if (!GetTensorInt32Values<Operand, OperandType>(operand, operandValues, model, data))
+ if (!GetTensorInt32Values<hal_1_1::HalPolicy>(operand, operandValues, model, data))
{
return false;
}
@@ -553,14 +553,14 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
// Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
- if (!GetInputInt32<Operand, OperandType>(operation, 4, descriptor.m_BeginMask, model, data)
- || !GetInputInt32<Operand, OperandType>(operation, 5, descriptor.m_EndMask, model, data)
- || !GetInputInt32<Operand, OperandType>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
+ if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
+ !GetInputInt32<hal_1_1::HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
+ !GetInputInt32<hal_1_1::HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -581,12 +581,12 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
@@ -603,7 +603,7 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model,
// NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
// if the operand index is out of bounds.
- const Operand* permOperand = GetInputOperand<Operand>(operation, 1, model, false);
+ const Operand* permOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
std::vector<int32_t> perm(rank);
if (!permOperand)
@@ -616,7 +616,7 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model,
}
else
{
- GetTensorInt32Values<Operand, OperandType>(*permOperand, perm, model, data);
+ GetTensorInt32Values<hal_1_1::HalPolicy>(*permOperand, perm, model, data);
}
std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
@@ -632,7 +632,7 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model,
armnn::PermuteDescriptor permuteDesc;
permuteDesc.m_DimMappings = permutationVector;
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -654,18 +654,18 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model,
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* blockOperand = GetInputOperand<Operand>(operation, 1, model);
+ const Operand* blockOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
if (!blockOperand)
{
return Fail("%s: Could not read input 1", __func__);
@@ -673,7 +673,7 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
// Convert the block operand to int32
std::vector<int32_t> block;
- if (!GetTensorInt32Values<Operand, OperandType>(*blockOperand, block, model, data))
+ if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*blockOperand, block, model, data))
{
return Fail("%s: Input 1 has invalid values", __func__);
}
@@ -699,7 +699,7 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
// Setting crops to 0,0 0,0 as it is not supported in Android NN API
batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
- const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -721,7 +721,7 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index da0f7e41..dd8558b3 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -19,6 +19,7 @@ class HalPolicy
public:
using Model = V1_1::Model;
using Operand = V1_0::Operand;
+ using OperandLifeTime = V1_0::OperandLifeTime;
using OperandType = V1_0::OperandType;
using Operation = V1_1::Operation;
using OperationType = V1_1::OperationType;
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index abc0cfca..11a1cefa 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -131,9 +131,9 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
switch (operation.type)
{
case V1_2::OperationType::CONV_2D:
- return ConvertConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
+ return ConvertConv2d<hal_1_2::HalPolicy>(operation, model, data);
case V1_2::OperationType::DEPTHWISE_CONV_2D:
- return ConvertDepthwiseConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
+ return ConvertDepthwiseConv2d<hal_1_2::HalPolicy>(operation, model, data);
default:
return Fail("%s: Operation type %s not supported in ArmnnDriver",
__func__, toString(operation.type).c_str());
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index d27e4c7a..e6001c48 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -19,6 +19,8 @@ class HalPolicy
public:
using Model = V1_2::Model;
using Operand = V1_2::Operand;
+ using OperandLifeTime = V1_0::OperandLifeTime;
+ using OperandType = V1_2::OperandType;
using Operation = V1_2::Operation;
using OperationType = V1_2::OperationType;
using ExecutionCallback = V1_2::IExecutionCallback;
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 2f26c120..8b63f780 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -492,8 +492,13 @@ namespace armnn_driver
using namespace android::nn;
-template<typename HalOperand, typename HalOperation, typename HalModel>
-const HalOperand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model,
+template<typename HalPolicy,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+const HalOperand* GetInputOperand(const HalOperation& operation,
+ uint32_t inputIndex,
+ const HalModel& model,
bool failOnIndexOutOfBounds = true)
{
if (inputIndex >= operation.inputs.size())
@@ -509,8 +514,13 @@ const HalOperand* GetInputOperand(const HalOperation& operation, uint32_t inputI
return &model.operands[operation.inputs[inputIndex]];
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
-const HalOperand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
+template<typename HalPolicy,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+const HalOperand* GetOutputOperand(const HalOperation& operation,
+ uint32_t outputIndex,
+ const HalModel& model)
{
if (outputIndex >= operation.outputs.size())
{
@@ -524,7 +534,56 @@ const HalOperand* GetOutputOperand(const HalOperation& operation, uint32_t outpu
return &model.operands[operation.outputs[outputIndex]];
}
-template<typename HalOperand, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalModel = typename HalPolicy::Model>
+const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
+ const HalModel& model,
+ const ConversionData& data,
+ bool optional = false)
+{
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
+ const void* valueStart = nullptr;
+ switch (operand.lifetime)
+ {
+ case HalOperandLifeTime::CONSTANT_COPY:
+ {
+ // Constant found in model.operandValues
+ valueStart = &model.operandValues[operand.location.offset];
+ break;
+ }
+ case HalOperandLifeTime::CONSTANT_REFERENCE:
+ {
+ // Constant specified via a Memory object
+ valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
+ break;
+ }
+ case HalOperandLifeTime::NO_VALUE:
+ {
+ // An optional input tensor with no values is not an error so should not register as a fail
+ if (optional)
+ {
+ valueStart = nullptr;
+ break;
+ }
+ [[fallthrough]];
+ }
+ default:
+ {
+ // Unsupported/invalid (e.g. can't get value of an input to the model)
+ Fail("%s: unsupported/invalid operand lifetime: %s",
+ __func__, toString(operand.lifetime).c_str());
+ valueStart = nullptr;
+ }
+ }
+
+ return valueStart;
+}
+
+template<typename HalPolicy,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalModel = typename HalPolicy::Model>
ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
const HalModel& model,
const ConversionData& data,
@@ -532,6 +591,8 @@ ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
const armnn::TensorShape* overrideTensorShape = nullptr,
bool optional = false)
{
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
if (!IsOperandTypeSupportedForTensors(operand.type))
{
Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
@@ -539,15 +600,15 @@ ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
}
if (!optional &&
- operand.lifetime !=V1_0::OperandLifeTime::CONSTANT_COPY &&
- operand.lifetime !=V1_0::OperandLifeTime::CONSTANT_REFERENCE &&
- operand.lifetime !=V1_0::OperandLifeTime::NO_VALUE)
+ operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
+ operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
+ operand.lifetime != HalOperandLifeTime::NO_VALUE)
{
Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
return ConstTensorPin();
}
- const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
+ const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
if (!valueStart)
{
if (optional)
@@ -568,7 +629,9 @@ ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
uint32_t inputIndex,
const HalModel& model,
@@ -577,65 +640,27 @@ ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operati
const armnn::TensorShape* overrideTensorShape = nullptr,
bool optional = false)
{
- const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
+ using HalOperand = typename HalPolicy::Operand;
+
+ const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
if (!operand)
{
Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
return ConstTensorPin();
}
- return ConvertOperandToConstTensorPin(*operand,
- model,
- data,
- dimensionMappings,
- overrideTensorShape,
- optional);
+ return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
+ model,
+ data,
+ dimensionMappings,
+ overrideTensorShape,
+ optional);
}
-template<typename HalOperand, typename HalModel>
-const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
- const HalModel& model,
- const ConversionData& data,
- bool optional = false)
-{
- const void* valueStart = nullptr;
-
- switch (operand.lifetime)
- {
- case V1_0::OperandLifeTime::CONSTANT_COPY:
- {
- // Constant found in model.operandValues
- valueStart = &model.operandValues[operand.location.offset];
- break;
- }
- case V1_0::OperandLifeTime::CONSTANT_REFERENCE:
- {
- // Constant specified via a Memory object
- valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
- break;
- }
- case V1_0::OperandLifeTime::NO_VALUE:
- {
- // An optional input tensor with no values is not an error so should not register as a fail
- if (optional)
- {
- valueStart = nullptr;
- break;
- }
- [[fallthrough]];
- }
- default:
- {
- // Unsupported/invalid (e.g. can't get value of an input to the model)
- Fail("%s: unsupported/invalid operand lifetime: %s",
- __func__, toString(operand.lifetime).c_str());
- valueStart = nullptr;
- }
- }
-
- return valueStart;
-}
-
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel, typename OutputType>
+template<typename HalPolicy,
+ typename OutputType,
+ typename HalOperandType = typename HalPolicy::OperandType,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputScalar(const HalOperation& operation,
uint32_t inputIndex,
HalOperandType type,
@@ -643,7 +668,9 @@ bool GetInputScalar(const HalOperation& operation,
const HalModel& model,
const ConversionData& data)
{
- const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
+ using HalOperand = typename HalPolicy::Operand;
+
+ const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
if (!operand)
{
return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
@@ -661,7 +688,7 @@ bool GetInputScalar(const HalOperation& operation,
__func__, operand->location.length, sizeof(OutputType));
}
- const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
+ const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
if (!valueAddress)
{
return Fail("%s: failed to get address for operand", __func__);
@@ -671,29 +698,34 @@ bool GetInputScalar(const HalOperation& operation,
return true;
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputInt32(const HalOperation& operation,
uint32_t inputIndex,
int32_t& outValue,
const HalModel& model,
const ConversionData& data)
{
- return GetInputScalar<HalOperand, HalOperandType>(operation, inputIndex, HalOperandType::INT32, outValue, model,
- data);
+ return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputFloat32(const HalOperation& operation,
uint32_t inputIndex,
float& outValue,
const HalModel& model,
const ConversionData& data)
{
- return GetInputScalar<HalOperand, HalOperandType>(operation, inputIndex, HalOperandType::FLOAT32, outValue, model,
- data);
+ return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalOperandType = typename HalPolicy::OperandType,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputActivationFunctionImpl(const HalOperation& operation,
uint32_t inputIndex,
HalOperandType type,
@@ -711,7 +743,7 @@ bool GetInputActivationFunctionImpl(const HalOperation& operation,
}
int32_t activationFunctionAsInt;
- if (!GetInputScalar<HalOperand, HalOperandType>(operation, inputIndex, type, activationFunctionAsInt, model, data))
+ if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
{
return Fail("%s: failed to get activation input value", __func__);
}
@@ -719,22 +751,26 @@ bool GetInputActivationFunctionImpl(const HalOperation& operation,
return true;
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputActivationFunction(const HalOperation& operation,
uint32_t inputIndex,
ActivationFn& outActivationFunction,
const HalModel& model,
const ConversionData& data)
{
- return GetInputActivationFunctionImpl<HalOperand, HalOperandType>(operation,
- inputIndex,
- HalOperandType::INT32,
- outActivationFunction,
- model,
- data);
+ return GetInputActivationFunctionImpl<HalPolicy>(operation,
+ inputIndex,
+ HalPolicy::OperandType::INT32,
+ outActivationFunction,
+ model,
+ data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
uint32_t inputIndex,
ActivationFn& outActivationFunction,
@@ -742,16 +778,18 @@ bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
const ConversionData& data)
{
// This only accepts a 1-D tensor of size 1
- return GetInputActivationFunctionImpl<HalOperand, HalOperandType>(operation,
- inputIndex,
- HalOperandType::INT32,
- outActivationFunction,
- model,
- data);
+ return GetInputActivationFunctionImpl<HalPolicy>(operation,
+ inputIndex,
+ HalPolicy::OperandType::INT32,
+ outActivationFunction,
+ model,
+ data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetOptionalInputActivation(const HalOperation& operation,
uint32_t inputIndex,
ActivationFn& activationFunction,
@@ -764,8 +802,7 @@ bool GetOptionalInputActivation(const HalOperation& operation,
}
else
{
- if (!GetInputActivationFunction<HalOperand, HalOperandType>(operation, inputIndex, activationFunction, model,
- data))
+ if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -773,11 +810,10 @@ bool GetOptionalInputActivation(const HalOperation& operation,
return true;
}
-template <typename HalOperand,
- typename HalOperandType,
- typename HalOperation,
- typename HalModel,
- typename ConvolutionDescriptor>
+template<typename HalPolicy,
+ typename ConvolutionDescriptor,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
uint32_t dilationXIndex,
ConvolutionDescriptor& descriptor,
@@ -787,35 +823,37 @@ bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
bool success = true;
if (operation.inputs.size() >= dilationXIndex + 2)
{
- success &= GetInputScalar<HalOperand, HalOperandType>(operation,
- dilationXIndex,
- HalOperandType::INT32,
- descriptor.m_DilationX,
- model,
- data);
- success &= GetInputScalar<HalOperand, HalOperandType>(operation,
- dilationXIndex + 1,
- HalOperandType::INT32,
- descriptor.m_DilationY,
- model,
- data);
+ success &= GetInputScalar<HalPolicy>(operation,
+ dilationXIndex,
+ HalPolicy::OperandType::INT32,
+ descriptor.m_DilationX,
+ model,
+ data);
+ success &= GetInputScalar<HalPolicy>(operation,
+ dilationXIndex + 1,
+ HalPolicy::OperandType::INT32,
+ descriptor.m_DilationY,
+ model,
+ data);
}
return success;
}
-template<typename HalOperand, typename HalOperandType, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalModel = typename HalPolicy::Model>
bool GetTensorInt32Values(const HalOperand& operand,
std::vector<int32_t>& outValues,
const HalModel& model,
const ConversionData& data)
{
- if (operand.type != HalOperandType::TENSOR_INT32)
+ if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
{
return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
}
- const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
+ const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
if (!startAddress)
{
return Fail("%s: failed to get operand address", __func__, operand.type);
@@ -834,7 +872,9 @@ bool GetTensorInt32Values(const HalOperand& operand,
return true;
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool GetInputPaddingScheme(const HalOperation& operation,
uint32_t inputIndex,
PaddingScheme& outPaddingScheme,
@@ -842,7 +882,7 @@ bool GetInputPaddingScheme(const HalOperation& operation,
const ConversionData& data)
{
int32_t paddingSchemeAsInt;
- if (!GetInputInt32<HalOperand, HalOperandType>(operation, inputIndex, paddingSchemeAsInt, model, data))
+ if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
{
return Fail("%s: failed to get padding scheme input value", __func__);
}
@@ -851,13 +891,18 @@ bool GetInputPaddingScheme(const HalOperation& operation,
return true;
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
uint32_t inputIndex,
const HalModel& model,
ConversionData& data)
{
- const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
+ const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
if (!operand)
{
Fail("%s: failed to get input operand %i", __func__, inputIndex);
@@ -874,9 +919,9 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
switch (operand->lifetime)
{
- case V1_0::OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
- case V1_0::OperandLifeTime::MODEL_INPUT:
- case V1_0::OperandLifeTime::MODEL_OUTPUT:
+ case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
+ case HalOperandLifeTime::MODEL_INPUT:
+ case HalOperandLifeTime::MODEL_OUTPUT:
{
// The tensor is either an operand internal to the model, or a model input.
// It can be associated with an ArmNN output slot for an existing layer.
@@ -886,11 +931,11 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
break;
}
- case V1_0::OperandLifeTime::CONSTANT_COPY:
- case V1_0::OperandLifeTime::CONSTANT_REFERENCE:
+ case HalOperandLifeTime::CONSTANT_COPY:
+ case HalOperandLifeTime::CONSTANT_REFERENCE:
{
// The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
- ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
+ ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
if (tensorPin.IsValid())
{
if (!IsLayerSupportedForAnyBackend(__func__,
@@ -924,7 +969,9 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
}
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
uint32_t operationOutputIndex,
armnn::IConnectableLayer& layer,
@@ -932,7 +979,9 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
const HalModel& model,
ConversionData& data)
{
- const HalOperand* outputOperand = GetOutputOperand<HalOperand>(operation, operationOutputIndex, model);
+ using HalOperand = typename HalPolicy::Operand;
+
+ const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
{
return false;
@@ -948,13 +997,17 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
return true;
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
uint32_t inputIndex,
const HalModel& model,
ConversionData& data)
{
- const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
+ using HalOperand = typename HalPolicy::Operand;
+
+ const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
if (!operand)
{
return armnn::DataLayout::NHWC;
@@ -965,7 +1018,7 @@ armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
return armnn::DataLayout::NHWC;
}
- const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
+ const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
if (!valueAddress)
{
return armnn::DataLayout::NHWC;
@@ -981,30 +1034,36 @@ armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
}
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
uint32_t outputIndex,
armnn::IConnectableLayer& layer,
const HalModel& model,
ConversionData& data)
{
- return SetupAndTrackLayerOutputSlot<HalOperand>(operation, outputIndex, layer, outputIndex, model, data);
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, outputIndex, layer, outputIndex, model, data);
}
-template<typename HalOperand, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool ConvertToActivation(const HalOperation& operation,
const char* operationName,
const armnn::ActivationDescriptor& activationDesc,
const HalModel& model,
ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
+ using HalOperand = typename HalPolicy::Operand;
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Input 0 is invalid", operationName);
}
- const HalOperand* outputOperand = GetOutputOperand<HalOperand>(operation, 0, model);
+ const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
if (!outputOperand)
{
return false;
@@ -1024,23 +1083,28 @@ bool ConvertToActivation(const HalOperation& operation,
BOOST_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool ConvertPooling2d(const HalOperation& operation,
const char* operationName,
armnn::PoolingAlgorithm poolType,
const HalModel& model,
ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0", operationName);
}
- const HalOperand* output = GetOutputOperand<HalOperand>(operation, 0, model);
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -1060,16 +1124,12 @@ bool ConvertPooling2d(const HalOperation& operation,
{
// one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
android::nn::PaddingScheme scheme;
- if (!GetInputPaddingScheme<HalOperand, HalOperandType>(operation, 1, scheme, model, data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight,
- model, data)
- || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 6, activation, model, data))
+ if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
+ !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
{
return Fail("%s: Operation has invalid inputs", operationName);
}
@@ -1083,23 +1143,15 @@ bool ConvertPooling2d(const HalOperation& operation,
else
{
// one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
- if (!GetInputScalar<HalOperand, HalOperandType>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight,
- model, data)
- || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 9, activation, model, data))
+ if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
+ !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
{
return Fail("%s: Operation has invalid inputs", operationName);
}
@@ -1129,19 +1181,24 @@ bool ConvertPooling2d(const HalOperation& operation,
input.Connect(pooling2dLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const HalOperand* output = GetOutputOperand<HalOperand>(operation, 0, model);
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -1151,8 +1208,8 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
// ArmNN does not currently support non-fixed weights or bias
- const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 1, model, data);
- const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 2, model, data);
+ const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
+ const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
if (!weightsPin.IsValid() || !biasPin.IsValid())
{
@@ -1169,35 +1226,27 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
if (operation.inputs.size() >= 10)
{
- if (!GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model,
- data)
- || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 9, activation, model, data)
- || !GetOptionalConvolutionDilationParams<HalOperand, HalOperandType>(operation, 11, desc, model, data))
+ if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
+ !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
+ !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 10, model, data);
+ desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
}
else if (operation.inputs.size() >= 7)
{
android::nn::PaddingScheme paddingScheme;
- if (!GetInputPaddingScheme<HalOperand, HalOperandType>(operation, 3, paddingScheme, model, data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_StrideX,
- model, data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model,
- data)
- || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 6, activation, model, data)
- || !GetOptionalConvolutionDilationParams<HalOperand, HalOperandType>(operation, 8, desc, model, data))
+ if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
+ !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
+ !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1210,7 +1259,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
- desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 7, model, data);
+ desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
}
else
{
@@ -1249,20 +1298,25 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
input.Connect(startLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
}
-template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const HalOperand* output = GetOutputOperand<HalOperand>(operation, 0, model);
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
if (!output)
{
@@ -1275,7 +1329,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
// ArmNN does not currently support non-fixed weights or bias
// Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
- const HalOperand* weightsOperand = GetInputOperand<HalOperand>(operation, 1, model);
+ const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
if (weightsOperand == nullptr)
{
@@ -1287,11 +1341,11 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
// Look ahead to find the optional DataLayout, if present
if (operation.inputs.size() >= 12)
{
- desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 11, model, data);
+ desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
}
else if (operation.inputs.size() >= 9)
{
- desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 8, model, data);
+ desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
}
armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
@@ -1308,11 +1362,16 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
// Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
- const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 1, model, data,
- HWIMToMIHW, &weightsShape);
+ const ConstTensorPin weightsPin =
+ ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
+ 1,
+ model,
+ data,
+ HWIMToMIHW,
+ &weightsShape);
// Bias is a 1D tensor
- const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 2, model, data);
+ const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
if (!weightsPin.IsValid() || !biasPin.IsValid())
{
@@ -1327,20 +1386,14 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
if (operation.inputs.size() >= 11)
{
- if (!GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model,
- data)
- || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 10, activation, model, data)
- || !GetOptionalConvolutionDilationParams<HalOperand, HalOperandType>(operation, 12, desc, model, data))
+ if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
+ !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
+ !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1348,13 +1401,11 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
else if (operation.inputs.size() >= 8)
{
android::nn::PaddingScheme paddingScheme;
- if (!GetInputPaddingScheme<HalOperand, HalOperandType>(operation, 3, paddingScheme, model, data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model,
- data)
- || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model,
- data)
- || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 7, activation, model, data)
- || !GetOptionalConvolutionDilationParams<HalOperand, HalOperandType>(operation, 9, desc, model, data))
+ if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
+ !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
+ !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1402,7 +1453,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
input.Connect(startLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
}
} // namespace armnn_driver