From d261ffe7211dca5727e3472f7e29a3d16630c1ec Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 4 Aug 2023 13:17:23 +0100 Subject: IVGCVSW-6294 Added Split support to Android-NN-Driver * Added Split support to Android-NN-Driver Signed-off-by: Mike Kelly Change-Id: I4b0bc146597dae89b68402aa78d1f1b1c894a7fb --- 1.2/HalPolicy.cpp | 8 +++ 1.2/HalPolicy.hpp | 2 + 1.3/HalPolicy.cpp | 8 +++ 1.3/HalPolicy.hpp | 2 + ConversionUtils_1_2.hpp | 154 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 174 insertions(+) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 9a6266e5..9c44003f 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -173,6 +173,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertSpaceToBatchNd(operation, model, data); case V1_2::OperationType::SPACE_TO_DEPTH: return ConvertSpaceToDepth(operation, model, data); + case V1_2::OperationType::SPLIT: + return ConvertSplit(operation, model, data); case V1_2::OperationType::SQRT: return ConvertSqrt(operation, model, data); case V1_2::OperationType::SQUEEZE: @@ -469,6 +471,12 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv return ::ConvertLstm(operation, model, data); } +bool HalPolicy::ConvertSplit(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertSplit()"); + return ::ConvertSplit(operation, model, data); +} + bool HalPolicy::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertSqrt()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 796f4de8..4d77dfe5 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -141,6 +141,8 @@ private: static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSplit(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data); diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp index 2c8ef065..e5f295fd 100644 --- a/1.3/HalPolicy.cpp +++ b/1.3/HalPolicy.cpp @@ -159,6 +159,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertSpaceToBatchNd(operation, model, data); case V1_3::OperationType::SPACE_TO_DEPTH: return ConvertSpaceToDepth(operation, model, data); + case V1_3::OperationType::SPLIT: + return ConvertSplit(operation, model, data); case V1_3::OperationType::SQRT: return ConvertSqrt(operation, model, data); case V1_3::OperationType::SQUEEZE: @@ -503,6 +505,12 @@ bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& return ::ConvertTransposeConv2d(operation, model, data); } +bool HalPolicy::ConvertSplit(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_3::HalPolicy::ConvertSplit()"); + return ::ConvertSplit(operation, model, data); +} + bool HalPolicy::ConvertTile(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_3::HalPolicy::ConvertTile()"); diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp index b0bc09b4..c876da19 100644 --- a/1.3/HalPolicy.hpp +++ b/1.3/HalPolicy.hpp @@ -153,6 +153,8 @@ private: static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSplit(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index e5840468..00c891ae 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -2474,6 +2474,160 @@ bool ConvertSoftmax(const HalOperation& operation, const HalModel& model, Conver return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } +template +bool ConvertSplit(const Operation& operation, const Model& model, ConversionData& data) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + if (operation.inputs.size() != 3) + { + return Fail("%s: Optional inputs are not supported expected 3 was %i", __func__, operation.inputs.size()); + } + + // 0: An n-D tensor to split. + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + // 1: An ANEURALNETWORKS_INT32 scalar specifying the axis along which to split. + int32_t axis = 0; + // 2: An ANEURALNETWORKS_INT32 scalar indicating the number of splits along given axis. + // Must evenly divide axis size. + int32_t numSplits = 0; + + if (!input.IsValid() || + !GetInputScalar(operation, 1, HalOperandType::INT32, axis, model, data) || + !GetInputScalar(operation, 2, HalOperandType::INT32, numSplits, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // If number of splits is <= zero, return false. + if (numSplits <= 0) + { + return Fail("%s: Number of splits must be greater than zero", __func__); + } + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int inputDimSize = inputInfo.GetNumDimensions(); + int32_t inputDimensions = static_cast(inputDimSize); + + if (axis < -inputDimensions || axis >= inputDimensions) + { + // The axis for a tensor with n dimensions must be between -n and n-1 + // E.g. Rank 4 tensor can have axis in range [-4, 3) + // -1 == 3, -2 == 2, -3 == 1, -4 == 0 + return Fail("%s: Operation has invalid axis %i. Axis must be in range [-n, n-1]", __func__, axis); + } + auto splitDim = armnnUtils::GetUnsignedAxis(inputDimSize, axis); + + if (inputDimSize > MaxNumOfTensorDimensions) + { + return Fail("%s: The number of dimensions %i for split operation cannot be greater than %i", + __func__, inputInfo.GetNumDimensions(), MaxNumOfTensorDimensions); + } + std::vector splitterDimSizes(inputDimSize); + + // Add current input shape to splitterDimSizes + for (uint32_t i = 0; i < inputDimSize; ++i) + { + splitterDimSizes[i] = inputInfo.GetShape()[i]; + } + + if (splitterDimSizes[splitDim] % numSplits != 0) + { + return Fail("%s: The number of splits %i must evenly divide the dimension %i", + __func__, numSplits, splitterDimSizes[splitDim]); + } + splitterDimSizes[splitDim] /= numSplits; + + ViewsDescriptor descriptor(numSplits, inputDimSize); + + for (int32_t i = 0; i < numSplits; ++i) + { + // Set the size of the views. + for (uint32_t dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx) + { + descriptor.SetViewSize(i, dimIdx, splitterDimSizes[dimIdx]); + } + descriptor.SetViewOriginCoord(i, splitDim, splitterDimSizes[splitDim] * i); + } + + std::vector outputInfos; + for (int32_t i = 0; i < numSplits; ++i) + { + const HalOperand* output = GetOutputOperand(operation, i, model); + if (!output) + { + return Fail("%s: Could not read output %i", __func__, i); + } + + const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + outputInfos.template emplace_back(outputInfo); + } + std::vector> splitterOutputInfos(outputInfos.begin(), outputInfos.end()); + bool isSupported = false; + armnn::BackendId setBackend; + + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSplitterSupported, + data.m_Backends, + isSupported, + setBackend, + inputInfo, + splitterOutputInfos, + descriptor); + + if (!isSupported) + { + return Fail("%s: Layer is not supported", __func__); + } + + for (int32_t i = 0; i < numSplits; ++i) + { + const HalOperand* output = GetOutputOperand(operation, i, model); + if (!output) + { + return Fail("%s: Could not read output %i", __func__, i); + } + + const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + } + + IConnectableLayer* layer = data.m_Network->AddSplitterLayer(descriptor, "Split"); + if (!layer) + { + return Fail("%s: could not add the Layer", __func__); + } + input.Connect(layer->GetInputSlot(0)); + + auto validateFunc = [&](const armnn::TensorInfo&, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSplitterSupported, + data.m_Backends, + isSupported, + setBackend, + inputInfo, + splitterOutputInfos, + descriptor); + }; + + for (int32_t i = 0; i < numSplits; ++i) + { + bool ok = SetupAndTrackLayerOutputSlot(operation, i, *layer, model, data, nullptr, validateFunc); + + if (!ok) + { + return false; + } + } + return true; +} + template -- cgit v1.2.1