aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-08-04 13:17:23 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-09-26 13:21:50 +0000
commitd261ffe7211dca5727e3472f7e29a3d16630c1ec (patch)
treefa2fe09f70368f54e8d140cb4e182e6f08277525
parentf762b8391cdb6064fcca4c269fb582e5534bbc7f (diff)
downloadandroid-nn-driver-d261ffe7211dca5727e3472f7e29a3d16630c1ec.tar.gz
IVGCVSW-6294 Added Split support to Android-NN-Driverv23.11branches/android-nn-driver_23_11
* Added Split support to Android-NN-Driver Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I4b0bc146597dae89b68402aa78d1f1b1c894a7fb
-rw-r--r--1.2/HalPolicy.cpp8
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--1.3/HalPolicy.cpp8
-rw-r--r--1.3/HalPolicy.hpp2
-rw-r--r--ConversionUtils_1_2.hpp154
5 files changed, 174 insertions, 0 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 9a6266e5..9c44003f 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -173,6 +173,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertSpaceToBatchNd(operation, model, data);
case V1_2::OperationType::SPACE_TO_DEPTH:
return ConvertSpaceToDepth(operation, model, data);
+ case V1_2::OperationType::SPLIT:
+ return ConvertSplit(operation, model, data);
case V1_2::OperationType::SQRT:
return ConvertSqrt(operation, model, data);
case V1_2::OperationType::SQUEEZE:
@@ -469,6 +471,12 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
return ::ConvertLstm<hal_1_2::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertSplit(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertSplit()");
+ return ::ConvertSplit<hal_1_2::HalPolicy>(operation, model, data);
+}
+
bool HalPolicy::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertSqrt()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 796f4de8..4d77dfe5 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -141,6 +141,8 @@ private:
static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertSplit(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 2c8ef065..e5f295fd 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -159,6 +159,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertSpaceToBatchNd(operation, model, data);
case V1_3::OperationType::SPACE_TO_DEPTH:
return ConvertSpaceToDepth(operation, model, data);
+ case V1_3::OperationType::SPLIT:
+ return ConvertSplit(operation, model, data);
case V1_3::OperationType::SQRT:
return ConvertSqrt(operation, model, data);
case V1_3::OperationType::SQUEEZE:
@@ -503,6 +505,12 @@ bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model&
return ::ConvertTransposeConv2d<hal_1_3::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertSplit(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSplit()");
+ return ::ConvertSplit<hal_1_3::HalPolicy>(operation, model, data);
+}
+
bool HalPolicy::ConvertTile(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertTile()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index b0bc09b4..c876da19 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -153,6 +153,8 @@ private:
static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertSplit(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index e5840468..00c891ae 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -2475,6 +2475,160 @@ bool ConvertSoftmax(const HalOperation& operation, const HalModel& model, Conver
}
template<typename HalPolicy,
+ typename Operation = typename HalPolicy::Operation,
+ typename Model = typename HalPolicy::Model>
+bool ConvertSplit(const Operation& operation, const Model& model, ConversionData& data)
+{
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+
+ if (operation.inputs.size() != 3)
+ {
+ return Fail("%s: Optional inputs are not supported expected 3 was %i", __func__, operation.inputs.size());
+ }
+
+ // 0: An n-D tensor to split.
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+ // 1: An ANEURALNETWORKS_INT32 scalar specifying the axis along which to split.
+ int32_t axis = 0;
+ // 2: An ANEURALNETWORKS_INT32 scalar indicating the number of splits along given axis.
+ // Must evenly divide axis size.
+ int32_t numSplits = 0;
+
+ if (!input.IsValid() ||
+ !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data) ||
+ !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, numSplits, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ // If number of splits is <= zero, return false.
+ if (numSplits <= 0)
+ {
+ return Fail("%s: Number of splits must be greater than zero", __func__);
+ }
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ unsigned int inputDimSize = inputInfo.GetNumDimensions();
+ int32_t inputDimensions = static_cast<int32_t>(inputDimSize);
+
+ if (axis < -inputDimensions || axis >= inputDimensions)
+ {
+ // The axis for a tensor with n dimensions must be between -n and n-1
+ // E.g. Rank 4 tensor can have axis in range [-4, 3)
+ // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+ return Fail("%s: Operation has invalid axis %i. Axis must be in range [-n, n-1]", __func__, axis);
+ }
+ auto splitDim = armnnUtils::GetUnsignedAxis(inputDimSize, axis);
+
+ if (inputDimSize > MaxNumOfTensorDimensions)
+ {
+ return Fail("%s: The number of dimensions %i for split operation cannot be greater than %i",
+ __func__, inputInfo.GetNumDimensions(), MaxNumOfTensorDimensions);
+ }
+ std::vector<uint32_t> splitterDimSizes(inputDimSize);
+
+ // Add current input shape to splitterDimSizes
+ for (uint32_t i = 0; i < inputDimSize; ++i)
+ {
+ splitterDimSizes[i] = inputInfo.GetShape()[i];
+ }
+
+ if (splitterDimSizes[splitDim] % numSplits != 0)
+ {
+ return Fail("%s: The number of splits %i must evenly divide the dimension %i",
+ __func__, numSplits, splitterDimSizes[splitDim]);
+ }
+ splitterDimSizes[splitDim] /= numSplits;
+
+ ViewsDescriptor descriptor(numSplits, inputDimSize);
+
+ for (int32_t i = 0; i < numSplits; ++i)
+ {
+ // Set the size of the views.
+ for (uint32_t dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
+ {
+ descriptor.SetViewSize(i, dimIdx, splitterDimSizes[dimIdx]);
+ }
+ descriptor.SetViewOriginCoord(i, splitDim, splitterDimSizes[splitDim] * i);
+ }
+
+ std::vector<TensorInfo> outputInfos;
+ for (int32_t i = 0; i < numSplits; ++i)
+ {
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, i, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output %i", __func__, i);
+ }
+
+ const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ outputInfos.template emplace_back(outputInfo);
+ }
+ std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(outputInfos.begin(), outputInfos.end());
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsSplitterSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ inputInfo,
+ splitterOutputInfos,
+ descriptor);
+
+ if (!isSupported)
+ {
+ return Fail("%s: Layer is not supported", __func__);
+ }
+
+ for (int32_t i = 0; i < numSplits; ++i)
+ {
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, i, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output %i", __func__, i);
+ }
+
+ const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+ }
+
+ IConnectableLayer* layer = data.m_Network->AddSplitterLayer(descriptor, "Split");
+ if (!layer)
+ {
+ return Fail("%s: could not add the Layer", __func__);
+ }
+ input.Connect(layer->GetInputSlot(0));
+
+ auto validateFunc = [&](const armnn::TensorInfo&, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsSplitterSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ inputInfo,
+ splitterOutputInfos,
+ descriptor);
+ };
+
+ for (int32_t i = 0; i < numSplits; ++i)
+ {
+ bool ok = SetupAndTrackLayerOutputSlot<HalPolicy>(operation, i, *layer, model, data, nullptr, validateFunc);
+
+ if (!ok)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
bool ConvertLstm(const HalOperation& operation, const HalModel& model, ConversionData& data)