aboutsummaryrefslogtreecommitdiff
path: root/1.0
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-06-11 16:35:25 +0100
committerMike Kelly <mike.kelly@arm.com>2019-06-11 16:35:25 +0100
commitb5fdf38f0c6596958fab2b84882f2792a31e585a (patch)
treed6b578b51c1923c759653d8a04efa90923ad4dd8 /1.0
parentb92f8901fc34749337ea7a9ad7a2717fc9490de5 (diff)
downloadandroid-nn-driver-b5fdf38f0c6596958fab2b84882f2792a31e585a.tar.gz
IVGCVSW-3181 Add HAL 1.2 support to android-nn-driver
* Updated Android.mk to build HAL 1.2 driver * Added 1.2 HalPolicy and ArmnnDriver * Added 1.2 ArmnnPreparedModel * Updated converters and utilities to accept new HAL 1.2 operands and operand types. Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I62856deab24e106f72cccce09468db4971756fa6
Diffstat (limited to '1.0')
-rw-r--r--1.0/HalPolicy.cpp455
-rw-r--r--1.0/HalPolicy.hpp8
2 files changed, 128 insertions, 335 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 5f421076..8acb0d4b 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -25,9 +25,11 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_0::OperationType::CONCATENATION:
return ConvertConcatenation(operation, model, data);
case V1_0::OperationType::CONV_2D:
- return ConvertConv2d(operation, model, data);
+ return ValidateConv2dParameters(operation)
+ && ConvertConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
case V1_0::OperationType::DEPTHWISE_CONV_2D:
- return ConvertDepthwiseConv2d(operation, model, data);
+ return ValidateDepthwiseConv2dParameters(operation)
+ && ConvertDepthwiseConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
case V1_0::OperationType::DEQUANTIZE:
return ConvertDequantize(operation, model, data);
case V1_0::OperationType::FLOOR:
@@ -68,10 +70,28 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
}
}
+bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
+{
+ if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
+ {
+ return Fail("%s: Unsupported number of operation inputs", __func__);
+ }
+ return true;
+}
+
+bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
+{
+ if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
+ {
+ return Fail("%s: Unsupported number of operation inputs", __func__);
+ }
+ return true;
+}
+
bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+ LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
if (!input0.IsValid() || !input1.IsValid())
{
@@ -81,12 +101,12 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
// The FuseActivation parameter is always the input index 2
// and it should be optional
ActivationFn activationFunction;
- if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+ if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (!outputOperand)
{
return false;
@@ -113,7 +133,7 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
if (endLayer != nullptr)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
}
else
{
@@ -123,7 +143,7 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
{
- return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
+ return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
}
bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
@@ -138,12 +158,12 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
const std::size_t numInputTensors = operation.inputs.size() - 1;
int32_t concatDim;
- if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
+ if (!GetInputScalar<Operand, OperandType>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has no outputs", __func__);
@@ -179,14 +199,14 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
for (uint32_t i = 0; i < numInputTensors; ++i)
{
- const Operand* const operand = GetInputOperand(operation, i, model);
+ const Operand* const operand = GetInputOperand<Operand>(operation, i, model);
if (!operand)
{
return Fail("%s: Operation has invalid inputs", __func__);
}
armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
- LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
+ LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<Operand>(operation, i, model, data);
if (operandShape.GetNumDimensions() == 0)
{
@@ -346,250 +366,19 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
);
}
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
-}
-
-bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
-{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
- if (!input.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const Operand* output = GetOutputOperand(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
- // ArmNN does not currently support non-fixed weights or bias
- const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
- const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
-
- if (!weightsPin.IsValid() || !biasPin.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- armnn::ConstTensor weights = weightsPin.GetConstTensor();
- armnn::ConstTensor bias = biasPin.GetConstTensor();
- SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
-
- armnn::Convolution2dDescriptor desc;
- desc.m_DataLayout = armnn::DataLayout::NHWC;
- ActivationFn activation;
-
- if (operation.inputs.size() == 10)
- {
- if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
- !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
- !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
- !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
- !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
- !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
- !GetInputActivationFunction(operation, 9, activation, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
- }
- else if (operation.inputs.size() == 7)
- {
- android::nn::PaddingScheme paddingScheme;
- if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
- !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
- !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
- !GetInputActivationFunction(operation, 6, activation, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const uint32_t kernelX = weights.GetShape()[2];
- const uint32_t kernelY = weights.GetShape()[1];
- const uint32_t inputX = inputInfo.GetShape()[2];
- const uint32_t inputY = inputInfo.GetShape()[1];
-
- CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
- CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
- }
- else
- {
- return Fail("%s: Unsupported number of operation inputs", __func__);
- }
-
- desc.m_BiasEnabled = true;
- armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
-
- if (!IsLayerSupportedForAnyBackend(__func__,
- armnn::IsConvolution2dSupported,
- data.m_Backends,
- inputInfo,
- outputInfo,
- desc,
- weights.GetInfo(),
- biases))
- {
- return false;
- }
-
- armnn::IConnectableLayer* startLayer =
- data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
-
- if (!startLayer)
- {
- return Fail("%s: AddConvolution2dLayer failed", __func__);
- }
-
- armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
-
- if (!endLayer)
- {
- return Fail("%s: ProcessActivation failed", __func__);
- }
-
- input.Connect(startLayer->GetInputSlot(0));
-
- return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
-}
-
-bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
-{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
- if (!input.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const Operand* output = GetOutputOperand(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
- // ArmNN does not currently support non-fixed weights or bias
-
- // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
- const Operand* weightsOperand = GetInputOperand(operation, 1, model);
-
- if (weightsOperand == nullptr)
- {
- return Fail("%s: Operand is invalid", __func__);
- }
-
- // Reinterpret weight data as [ H, W, I, M ]
- armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
- weightsOperand->dimensions[2],
- inputInfo.GetShape()[3],
- weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
-
- // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
- const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
-
- const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
- HWIMToMIHW, &weightsShape);
-
- // Bias is a 1D tensor
- const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
-
- if (!weightsPin.IsValid() || !biasPin.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- armnn::ConstTensor weights = weightsPin.GetConstTensor();
- armnn::ConstTensor bias = biasPin.GetConstTensor();
- SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
-
- armnn::DepthwiseConvolution2dDescriptor desc;
- desc.m_DataLayout = armnn::DataLayout::NHWC;
- ActivationFn activation;
-
- if (operation.inputs.size() == 11)
- {
- if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
- !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
- !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
- !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
- !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
- !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
- !GetInputActivationFunction(operation, 10, activation, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
- }
- else if (operation.inputs.size() == 8)
- {
- android::nn::PaddingScheme paddingScheme;
- if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
- !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
- !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
- !GetInputActivationFunction(operation, 7, activation, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const uint32_t kernelX = weights.GetShape()[3];
- const uint32_t kernelY = weights.GetShape()[2];
- const uint32_t inputX = inputInfo.GetShape()[2];
- const uint32_t inputY = inputInfo.GetShape()[1];
-
- CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
- CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
- }
- else
- {
- return Fail("%s: Unsupported number of operation inputs", __func__);
- }
-
- desc.m_BiasEnabled = true;
- armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
-
- if (!IsLayerSupportedForAnyBackend(__func__,
- armnn::IsDepthwiseConvolutionSupported,
- data.m_Backends,
- inputInfo,
- outputInfo,
- desc,
- weights.GetInfo(),
- biases))
- {
- return false;
- }
-
- armnn::IConnectableLayer* startLayer =
- data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
- if (!startLayer)
- {
- return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
- }
-
- armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
- if (!endLayer)
- {
- return Fail("%s: ProcessActivation failed", __func__);
- }
-
- input.Connect(startLayer->GetInputSlot(0));
-
- return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid input", __func__);
}
- const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has invalid outputs", __func__);
@@ -608,18 +397,18 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has invalid outputs", __func__);
@@ -638,18 +427,18 @@ bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, Con
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand(operation, 0, model);
+ const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -659,8 +448,8 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
// ArmNN does not currently support non-fixed weights or bias
- ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
- ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D
+ ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 1, model, data); // 2D
+ ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 2, model, data); // 1D
if (!weightsPin.IsValid() || !biasPin.IsValid())
{
@@ -682,7 +471,7 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
ActivationFn activationFunction;
- if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
+ if (!GetInputActivationFunction<Operand, OperandType>(operation, 3, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -725,7 +514,7 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
input.Connect(startLayer->GetInputSlot(0));
}
- return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
}
else
{
@@ -737,13 +526,13 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
const Model& model,
ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand(operation, 0, model);
+ const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -759,10 +548,10 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
if (!input.IsValid() ||
- !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
- !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
- !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
- !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
+ !GetInputScalar<Operand, OperandType>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
+ !GetInputFloat32<Operand, OperandType>(operation, 2, descriptor.m_K, model, data) ||
+ !GetInputFloat32<Operand, OperandType>(operation, 3, descriptor.m_Alpha, model, data) ||
+ !GetInputFloat32<Operand, OperandType>(operation, 4, descriptor.m_Beta, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -786,7 +575,7 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
@@ -794,7 +583,7 @@ bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model,
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::Sigmoid;
- return ConvertToActivation(operation, __func__, desc, model, data);
+ return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
@@ -802,19 +591,19 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Inputs:
// 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
// “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0: input", __func__);
}
// 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
- LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
+ LayerInputHandle outputStateIn = ConvertToLayerInputHandle<Operand>(operation, 18, model, data);
if (!outputStateIn.IsValid())
{
return Fail("%s: Could not read input 18: outputStateIn", __func__);
}
// 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
- LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
+ LayerInputHandle cellStateIn = ConvertToLayerInputHandle<Operand>(operation, 19, model, data);
if (!cellStateIn.IsValid())
{
return Fail("%s: Could not read input 19: cellStateIn", __func__);
@@ -823,29 +612,33 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Get the mandatory input tensors:
// 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size].
- const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+ const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 2, model,
+ data);
// 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
- const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
+ const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 3, model,
+ data);
// 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size].
- const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
+ const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 4, model,
+ data);
// 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- const ConstTensorPin recurrentToForgetWeightsPin =
- ConvertOperationInputToConstTensorPin(operation, 6, model, data);
+ const ConstTensorPin recurrentToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 6,
+ model, data);
// 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
+ const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 7, model,
+ data);
// 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
const ConstTensorPin recurrentToOutputWeightsPin =
- ConvertOperationInputToConstTensorPin(operation, 8, model, data);
+ ConvertOperationInputToConstTensorPin<Operand>(operation, 8, model, data);
// 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
+ const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 13, model, data);
// 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
+ const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 14, model, data);
// 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
+ const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 15, model, data);
if (!inputToForgetWeightsPin.IsValid() ||
!inputToCellWeightsPin.IsValid() ||
@@ -863,31 +656,31 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Get the optional input tensors:
// 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size], where “num_units” corresponds to the number of cell units.
- const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 1, model,
+ data, g_DontPermute, nullptr, true);
// 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
// “num_units”), or the second dimension of the “projection_weights”, if defined.
- const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 5,
+ model, data, g_DontPermute, nullptr, true);
// 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 9, model,
+ data, g_DontPermute, nullptr, true);
// 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 10, model,
+ data, g_DontPermute, nullptr, true);
// 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 11, model,
+ data, g_DontPermute, nullptr, true);
// 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data,
+ const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 12, model, data,
g_DontPermute, nullptr, true);
// 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [output_size, num_units].
- const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data,
- g_DontPermute, nullptr, true);
+ const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 16, model,
+ data, g_DontPermute, nullptr, true);
// 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
- const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data,
+ const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 17, model, data,
g_DontPermute, nullptr, true);
if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
@@ -912,9 +705,9 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
ActivationFn activation;
float cellClip;
float projClip;
- if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
- !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
- !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
+ if (!GetInputActivationFunctionFromTensor<Operand, OperandType>(operation, 20, activation, model, data) ||
+ !GetInputScalar<Operand, OperandType>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
+ !GetInputScalar<Operand, OperandType>(operation, 22, OperandType::FLOAT32, projClip, model, data))
{
return Fail("%s: Operation has invalid scalar inputs", __func__);
}
@@ -922,26 +715,26 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
// Outputs:
// 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
// CIFG, or [batch_size, num_units * 3] without CIFG.
- const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
+ const Operand* scratchBuffer = GetOutputOperand<Operand>(operation, 0, model);
if (!scratchBuffer)
{
return Fail("%s: Could not read output 0: scratchBuffer", __func__);
}
// 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
- const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
+ const Operand* outputStateOut = GetOutputOperand<Operand>(operation, 1, model);
if (!outputStateOut)
{
return Fail("%s: Could not read output 1: outputStateOut", __func__);
}
// 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
- const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
+ const Operand* cellStateOut = GetOutputOperand<Operand>(operation, 2, model);
if (!cellStateOut)
{
return Fail("%s: Could not read output 2: cellStateOut", __func__);
}
// 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
// effectively the same as the current “output state (out)” value.
- const Operand* output = GetOutputOperand(operation, 3, model);
+ const Operand* output = GetOutputOperand<Operand>(operation, 3, model);
if (!output)
{
return Fail("%s: Could not read output 3: output", __func__);
@@ -1101,21 +894,21 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
outputStateIn.Connect(layer->GetInputSlot(1));
cellStateIn.Connect(layer->GetInputSlot(2));
- return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
- SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
- SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
- SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
+ return (SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, 0, model, data) &&
+ SetupAndTrackLayerOutputSlot<Operand>(operation, 1, *layer, 1, model, data) &&
+ SetupAndTrackLayerOutputSlot<Operand>(operation, 2, *layer, 2, model, data) &&
+ SetupAndTrackLayerOutputSlot<Operand>(operation, 3, *layer, 3, model, data));
}
bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand(operation, 0, model);
+ const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -1141,23 +934,23 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model&
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
{
- return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
+ return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
}
bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
{
- return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
+ return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
}
bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+ LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
if (!input0.IsValid() || !input1.IsValid())
{
@@ -1167,12 +960,12 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve
// The FuseActivation parameter is always the input index 2
// and it should be optional
ActivationFn activationFunction;
- if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+ if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (outputOperand == nullptr)
{
@@ -1200,7 +993,7 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve
if (endLayer != nullptr)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
}
else
{
@@ -1213,7 +1006,7 @@ bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, Conv
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::ReLu;
- return ConvertToActivation(operation, __func__, desc, model, data);
+ return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
@@ -1223,7 +1016,7 @@ bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, Con
desc.m_A = 1.0f;
desc.m_B = -1.0f;
- return ConvertToActivation(operation, __func__, desc, model, data);
+ return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
@@ -1232,18 +1025,18 @@ bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, Con
desc.m_Function = armnn::ActivationFunction::BoundedReLu;
desc.m_A = 6.0f;
- return ConvertToActivation(operation, __func__, desc, model, data);
+ return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (!outputOperand)
{
return Fail("%s: Operation has no outputs", __func__);
@@ -1252,7 +1045,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
armnn::SoftmaxDescriptor desc;
- if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
+ if (!GetInputFloat32<Operand, OperandType>(operation, 1, desc.m_Beta, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1271,7 +1064,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
@@ -1281,14 +1074,14 @@ bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, Conv
desc.m_A = 1.0f; // android nn does not support tanH parameters
desc.m_B = 1.0f; // set to 1.0f for unity scaling
- return ConvertToActivation(operation, __func__, desc, model, data);
+ return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
}
bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
{
- const Operand* inputOperand = GetInputOperand(operation, 0, model);
- const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
- const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+ const Operand* inputOperand = GetInputOperand<Operand>(operation, 0, model);
+ const Operand* requestedShapeOperand = GetInputOperand<Operand>(operation, 1, model);
+ const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
if (inputOperand == nullptr
|| requestedShapeOperand == nullptr
@@ -1305,7 +1098,7 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
}
std::vector<int32_t> targetDimensions;
- if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
+ if (!GetTensorInt32Values<Operand, OperandType>(*requestedShapeOperand, targetDimensions, model, data))
{
return Fail("%s: Could not read values of input 1", __func__);
}
@@ -1326,7 +1119,7 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
}
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0", __func__);
@@ -1349,18 +1142,18 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
{
- LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
if (!input.IsValid())
{
return Fail("%s: Could not read input 0", __func__);
}
- const Operand* output = GetOutputOperand(operation, 0, model);
+ const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
if (!output)
{
return Fail("%s: Could not read output 0", __func__);
@@ -1382,8 +1175,8 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m
}
- if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
- || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
+ if ( !GetInputScalar<Operand, OperandType>(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
+ || !GetInputScalar<Operand, OperandType>(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
{
return Fail("%s: Operation has invalid inputs", __func__);
}
@@ -1395,7 +1188,7 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
}
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
index c09dc8a8..844b67c7 100644
--- a/1.0/HalPolicy.hpp
+++ b/1.0/HalPolicy.hpp
@@ -35,10 +35,6 @@ private:
static bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data);
-
- static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
@@ -74,6 +70,10 @@ private:
static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ValidateConv2dParameters(const Operation& operation);
+
+ static bool ValidateDepthwiseConv2dParameters(const Operation& operation);
};
} // namespace hal_1_0