aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-05-20 15:31:05 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-05-23 13:37:29 +0000
commit15eb5832f45d35c5041ba35a43787e8003e22edb (patch)
tree09fed880bfb9f384d3170aad5c76e4d565267e20
parent495852f2adef1d11fbf13ce6347cf61973ce1a65 (diff)
downloadarmnn-15eb5832f45d35c5041ba35a43787e8003e22edb.tar.gz
IVGCVSW-2771 Fix SubTensor error in vgg16 ExecuteNetwork NEON
* Add check if Sub-tensors cannot be used, call ACL function * Add computation of SplitAxis from SplitterDescriptor * Add NeonSplitterWorkload functions * Modify IsSplitterSupported to call ACL validate function if sub-tensor cannot be used * Also check if quantization parameters match when using sub-tensors * Add more unit tests for Splitter in TfParser and TfLiteParser Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I31e4c7d055117c83c65b598c4125442173242226
-rw-r--r--include/armnn/ILayerSupport.hpp7
-rw-r--r--include/armnn/LayerSupport.hpp8
-rw-r--r--src/armnn/LayerSupport.cpp13
-rw-r--r--src/armnn/layers/SplitterLayer.cpp47
-rw-r--r--src/armnnTfLiteParser/test/Unpack.cpp21
-rw-r--r--src/armnnTfParser/test/Split.cpp218
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp21
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp8
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp6
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp13
-rw-r--r--src/backends/cl/ClLayerSupport.cpp13
-rw-r--r--src/backends/cl/ClLayerSupport.hpp6
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp34
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp6
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt1
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.cpp112
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.hpp18
-rw-r--r--src/backends/reference/RefLayerSupport.cpp13
-rw-r--r--src/backends/reference/RefLayerSupport.hpp6
20 files changed, 562 insertions, 10 deletions
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index c3fb7b016e..f41495ce1a 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -9,6 +9,7 @@
#include <armnn/Optional.hpp>
#include <cctype>
+#include <functional>
#include <memory>
#include <vector>
@@ -259,10 +260,16 @@ public:
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
virtual bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index e105b67740..0ae8705a5e 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -326,9 +326,17 @@ bool IsSpaceToBatchNdSupported(const BackendId& backend,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
+ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
+bool IsSplitterSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 47a0d3ec6b..5867fab039 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -538,13 +538,26 @@ bool IsSpaceToBatchNdSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
}
+ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
const ViewsDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
+ ARMNN_NO_DEPRECATE_WARN_END
+}
+
+bool IsSplitterSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
}
bool IsStridedSliceSupported(const BackendId& backend,
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index b3a1094118..4a6b2220a7 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -36,20 +36,57 @@ void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fa
{
//If sub tensors are supported than all the "splitter" need to do is to
//set the outputs to be appropriate sub tensors of the input.
- if (factory.SupportsSubTensors())
+ bool useSubTensors = factory.SupportsSubTensors();
+
+ if (useSubTensors)
{
const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
+ const TensorInfo& parentInfo = outputHandler.GetTensorInfo();
+
ITensorHandle* inputData = outputHandler.GetData();
+
+ std::vector<std::unique_ptr<ITensorHandle>> subTensors;
+
//Creates the outputs as subtensors of the input.
for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
{
- m_OutputHandlers[i].SetData(factory.CreateSubTensorHandle(*inputData,
- m_OutputHandlers[i].GetTensorInfo().GetShape(),
- m_Param.GetViewOrigin(i)));
+ const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
+
+ auto CreateSubTensor = [&]()
+ {
+ // Make sure quantization parameters are in the same space
+ if (parentInfo.IsTypeSpaceMatch(info))
+ {
+ return factory.CreateSubTensorHandle(*inputData,
+ info.GetShape(),
+ this->m_Param.GetViewOrigin(i));
+ }
+ return std::unique_ptr<ITensorHandle>();
+ };
+
+ auto subTensor = CreateSubTensor();
+ if (!subTensor)
+ {
+ useSubTensors = false;
+ break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
+ }
+ subTensors.push_back(std::move(subTensor));
+ }
+
+ if (useSubTensors)
+ {
+ unsigned int i = 0;
+ for (auto& subTensor : subTensors)
+ {
+ m_OutputHandlers[i].SetData(std::move(subTensor));
+ ++i;
+ }
+
}
}
- else
+
+ if (!useSubTensors)
{
for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
{
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
index 10e682e36a..6b3c57b0bd 100644
--- a/src/armnnTfLiteParser/test/Unpack.cpp
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -116,4 +116,25 @@ BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxi
{"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
}
+struct DefaultUnpackLastAxisFixture : UnpackFixture
+{
+ DefaultUnpackLastAxisFixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
+{
+ RunTest<2, armnn::DataType::Float32>(
+ 0,
+ { {"inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+ 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } } },
+ { {"outputTensor1", { 1.0f, 7.0f, 13.0f, 19.0f }},
+ {"outputTensor2", { 2.0f, 8.0f, 14.0f, 20.0f }},
+ {"outputTensor3", { 3.0f, 9.0f, 15.0f, 21.0f }},
+ {"outputTensor4", { 4.0f, 10.0f, 16.0f, 22.0f }},
+ {"outputTensor5", { 5.0f, 11.0f, 17.0f, 23.0f }},
+ {"outputTensor6", { 6.0f, 12.0f, 18.0f, 24.0f }} });
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Split.cpp b/src/armnnTfParser/test/Split.cpp
index bf42bf7c5d..10ff04df89 100644
--- a/src/armnnTfParser/test/Split.cpp
+++ b/src/armnnTfParser/test/Split.cpp
@@ -173,4 +173,222 @@ BOOST_FIXTURE_TEST_CASE(ParseSplit, InputFirstSplitFixture)
{ "Relu_2", { 0.25, 9.0f, 0.25f, 3.0625f } } });
}
+struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ SplitLastDimFixture(bool withDimZero=false) {
+ m_Prototext = R"(
+ node {
+ name: "Placeholder"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ dim {
+ size: 2
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 3
+ }
+ }
+ }
+ }
+ node {
+ name: "split/split_dim"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 3
+ }
+ }
+ }
+ }
+ node {
+ name: "split"
+ op: "Split"
+ input: "split/split_dim"
+ input: "Placeholder"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "num_split"
+ value {
+ i: 3
+ }
+ }
+ }
+ node {
+ name: "sub0/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 3.0
+ }
+ }
+ }
+ }
+ node {
+ name: "sub0"
+ op: "Sub"
+ input: "split"
+ input: "sub0/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "sub1/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 2.0
+ }
+ }
+ }
+ }
+ node {
+ name: "sub1"
+ op: "Sub"
+ input: "split:1"
+ input: "sub1/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "sub2/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 1.0
+ }
+ }
+ }
+ }
+ node {
+ name: "sub2"
+ op: "Sub"
+ input: "split:2"
+ input: "sub2/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ versions {
+ producer: 27
+ } )";
+
+ Setup( { { "Placeholder", { 1, 2, 2 , 3} } },
+ { "sub0", "sub1", "sub2" });
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(SplitLastDimTest, SplitLastDimFixture)
+{
+ BOOST_TEST(
+ (m_Parser->GetNetworkOutputBindingInfo("sub0").second.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+
+ BOOST_TEST(
+ (m_Parser->GetNetworkOutputBindingInfo("sub1").second.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+
+ BOOST_TEST(
+ (m_Parser->GetNetworkOutputBindingInfo("sub2").second.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+
+ RunTest<4>({ { "Placeholder", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f } } },
+ { { "sub0", { -2.0f, 1.0f, 4.0f, 7.0f } },
+ { "sub1", { 0.0f, 3.0f, 6.0f, 9.0f } },
+ { "sub2", { 2.0f, 5.0f, 8.0f, 11.0f } } });
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index b4673f7b31..5b8f983ecc 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -9,6 +9,8 @@
#include <arm_compute/core/Types.h>
+#include <boost/assert.hpp>
+
namespace armnn
{
@@ -130,4 +132,23 @@ inline unsigned int ComputeSoftmaxAclAxis(const armnn::TensorInfo& tensor)
return dim - 1;
}
+inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
+{
+ unsigned int numSplit = desc.GetNumViews();
+ unsigned int numDimensions = desc.GetNumDimensions();
+ std::set<unsigned int> splitAxis;
+
+ for (unsigned int i = 0; i < numSplit; ++i)
+ {
+ for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
+ {
+ if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
+ {
+ splitAxis.insert(dimIdx);
+ }
+ }
+ }
+ return splitAxis;
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 7760c079ac..9fcb496ba3 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -401,6 +401,14 @@ bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 88d5792819..75527584ed 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -247,10 +247,16 @@ public:
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 9679c35acb..0490a94864 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -703,7 +703,20 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
{
auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+ // Get vector of all outputs.
+ auto getTensorInfo = [&dataType](const OutputSlot& slot)
+ {
+ return OverrideDataType(slot.GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
+ auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
+ std::vector<TensorInfo> outputs(beginI, endI);
+
+ const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
+
result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
+ outputPtrs,
cLayer->GetParameters(),
reason);
break;
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 2ce5179045..21d191ab2c 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -607,6 +607,19 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
&TrueFunc<>);
}
+bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(descriptor);
+ ignore_unused(outputs);
+ return IsSupportedForDataTypeCl(reasonIfUnsupported,
+ input.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index b634d46768..fca0bfd352 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -200,10 +200,16 @@ public:
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index f4599ff8e4..fd9aac5bc5 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -17,6 +17,7 @@
#include <boost/core/ignore_unused.hpp>
#if defined(ARMCOMPUTENEON_ENABLED)
+#include <aclCommon/ArmComputeUtils.hpp>
#include "workloads/NeonAdditionWorkload.hpp"
#include "workloads/NeonActivationWorkload.hpp"
#include "workloads/NeonBatchNormalizationWorkload.hpp"
@@ -36,6 +37,7 @@
#include "workloads/NeonPooling2dWorkload.hpp"
#include "workloads/NeonResizeBilinearWorkload.hpp"
#include "workloads/NeonSoftmaxBaseWorkload.hpp"
+#include "workloads/NeonSplitterWorkload.hpp"
#include "workloads/NeonSubtractionWorkload.hpp"
#endif
@@ -478,6 +480,38 @@ bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
&TrueFunc<>);
}
+bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+#if defined(ARMCOMPUTENEON_ENABLED)
+ // Split along the last dimension, cannot use sub-tensors
+ // as width and height of the sub-tensors do not match
+ // the width and height of the parent tensor
+ // in case of input with more than 2D.
+ std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
+ if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
+ *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ outputs,
+ *splitAxis.begin());
+ }
+#endif
+ for (auto output : outputs)
+ {
+ if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
+ return false;
+ }
+ }
+ return true;
+}
+
bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 8312bb977a..5e8e0bdbed 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -158,10 +158,16 @@ public:
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 6824879ac9..6931bd7325 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -46,6 +46,7 @@ BACKEND_SOURCES := \
workloads/NeonSoftmaxBaseWorkload.cpp \
workloads/NeonSoftmaxFloatWorkload.cpp \
workloads/NeonSoftmaxUint8Workload.cpp \
+ workloads/NeonSplitterWorkload.cpp \
workloads/NeonSubtractionWorkload.cpp
else
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index f1c773dc4c..8b2ad63f45 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -58,6 +58,7 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonSoftmaxFloatWorkload.hpp
NeonSoftmaxUint8Workload.cpp
NeonSoftmaxUint8Workload.hpp
+ NeonSplitterWorkload.cpp
NeonSplitterWorkload.hpp
NeonSubtractionWorkload.cpp
NeonSubtractionWorkload.hpp
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
new file mode 100644
index 0000000000..bf35939127
--- /dev/null
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
@@ -0,0 +1,112 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonSplitterWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+namespace
+{
+unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int splitAxis)
+{
+ return (numDimensions - splitAxis) - 1;
+}
+
+} //namespace
+
+arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ unsigned int splitAxis)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+
+ size_t numOutputs = outputs.size();
+
+ std::vector<arm_compute::TensorInfo> aclOutputs;
+ aclOutputs.reserve(numOutputs);
+
+ std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
+ aclOutputPtr.reserve(numOutputs);
+
+ for (size_t i = 0u; i < outputs.size(); ++i)
+ {
+ aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
+ aclOutputPtr.emplace_back(&aclOutputs.back());
+ }
+
+ unsigned int aclAxis = CalcAclAxis(input.GetNumDimensions(), splitAxis);
+ return arm_compute::NESplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
+}
+
+NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<SplitterQueueDescriptor>(descriptor, info)
+{
+ bool allOutputsAreSubtensors = true;
+
+ // Check that all outputs are sub-tensors
+ for (auto output : m_Data.m_Outputs)
+ {
+ if (output && !output->GetParent())
+ {
+ // Non sub-tensor input found so we need to execute the split function
+ allOutputsAreSubtensors = false;
+ break;
+ }
+ }
+
+ if (allOutputsAreSubtensors)
+ {
+ // Can skip configuring the split function since it's not executed
+ return;
+ }
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
+ std::vector<arm_compute::ITensor *> aclOutputs;
+ for (auto output : m_Data.m_Outputs)
+ {
+ arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast<INeonTensorHandle>(output)->GetTensor();
+ aclOutputs.emplace_back(&aclOutput);
+ }
+
+ // Create the layer function
+ m_Layer.reset(new arm_compute::NESplit());
+
+ // Configure input and output tensors
+ std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
+ if (splitAxis.size() != 1)
+ {
+ throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
+ }
+
+ unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
+ m_Layer->configure(&input, aclOutputs, aclAxis);
+
+ // Prepare
+ m_Layer->prepare();
+}
+
+void NeonSplitterWorkload::Execute() const
+{
+ if (m_Layer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSplitterWorkload_Execute");
+ m_Layer->run();
+ }
+}
+
+} //namespace armnn
+
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.hpp b/src/backends/neon/workloads/NeonSplitterWorkload.hpp
index 2a7ee193d0..f9025663ca 100644
--- a/src/backends/neon/workloads/NeonSplitterWorkload.hpp
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.hpp
@@ -7,18 +7,26 @@
#include <backendsCommon/Workload.hpp>
+#include <arm_compute/runtime/NEON/NEFunctions.h>
+
+#include <functional>
+
namespace armnn
{
+arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ unsigned int splitAxis);
+
class NeonSplitterWorkload : public BaseWorkload<SplitterQueueDescriptor>
{
public:
- using BaseWorkload<SplitterQueueDescriptor>::BaseWorkload;
+ NeonSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
- virtual void Execute() const override
- {
- // With subtensors, splitter is a no-op.
- }
+private:
+ mutable std::unique_ptr<arm_compute::NESplit> m_Layer;
};
} //namespace armnn
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 7beff72dad..6ad6816474 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -998,6 +998,19 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
&TrueFunc<>);
}
+bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(descriptor);
+ ignore_unused(outputs);
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index a4ae01e403..944061d5a6 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -236,10 +236,16 @@ public:
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,