aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikhil Raj Arm <nikhil.raj@arm.com>2023-08-11 08:55:21 +0000
committerNikhil Raj Arm <nikhil.raj@arm.com>2023-08-11 08:55:21 +0000
commit008270f8c1359a7d62c2f881326b4d3f0d8b7b56 (patch)
tree5496cc6b428e1ecc0740aaf62dba331c77a9a9eb
parent4980e21193f0a14fef084a7f4b4197392f3c0845 (diff)
downloadarmnn-008270f8c1359a7d62c2f881326b4d3f0d8b7b56.tar.gz
Revert "MLCE-1093 Reshape and concat invalid results"
This reverts commit 4980e21193f0a14fef084a7f4b4197392f3c0845. Reason for revert: Android Build for v82a failed due to schema not re-generating Change-Id: Ic19cf471b487f321c97ff837d36526512fb12fa4
-rw-r--r--delegate/classic/src/Split.hpp4
-rw-r--r--delegate/classic/src/Unpack.hpp2
-rw-r--r--delegate/opaque/src/Split.hpp4
-rw-r--r--delegate/opaque/src/Unpack.hpp2
-rw-r--r--src/armnn/Descriptors.cpp8
-rw-r--r--src/armnnDeserializer/Deserializer.cpp5
-rw-r--r--src/armnnSerializer/Serializer.cpp4
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp8
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp49
-rw-r--r--src/backends/cl/ClTensorHandleFactory.cpp2
-rw-r--r--src/backends/neon/NeonTensorHandleFactory.cpp2
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp283
14 files changed, 292 insertions, 98 deletions
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
index 57b7f8074e..aaa610259f 100644
--- a/delegate/classic/src/Split.hpp
+++ b/delegate/classic/src/Split.hpp
@@ -107,8 +107,6 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
splitterDimSizes[splitDim] /= numSplits;
armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
- splitDescriptor.SetAxis(axis);
-
for (unsigned int j = 0; j < numSplits; ++j)
{
// Set the size of the views.
@@ -303,8 +301,6 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
}
armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
- splitDescriptor.SetAxis(axis);
-
unsigned int accumSplit = 0;
for (unsigned int j = 0; j < numSplits; ++j)
{
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
index b3336ec990..2cd32564c1 100644
--- a/delegate/classic/src/Unpack.hpp
+++ b/delegate/classic/src/Unpack.hpp
@@ -96,8 +96,6 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
unpackDimSizes[unpackAxis] /= unpackNum;
armnn::SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
- splitDesc.SetAxis(unpackAxis);
-
for (unsigned int j = 0; j < unpackNum; ++j)
{
// Set the size of the views.
diff --git a/delegate/opaque/src/Split.hpp b/delegate/opaque/src/Split.hpp
index 199f46b126..2dbfa602fb 100644
--- a/delegate/opaque/src/Split.hpp
+++ b/delegate/opaque/src/Split.hpp
@@ -134,8 +134,6 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
splitterDimSizes[splitDim] /= numSplits;
armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
- splitDescriptor.SetAxis(axis);
-
for (int j = 0; j < numSplits; ++j)
{
// Set the size of the views.
@@ -369,9 +367,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
}
armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
- splitDescriptor.SetAxis(axis);
unsigned int accumSplit = 0;
-
for (int j = 0; j < numSplits; ++j)
{
unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
diff --git a/delegate/opaque/src/Unpack.hpp b/delegate/opaque/src/Unpack.hpp
index 525529ff7b..0956d1688e 100644
--- a/delegate/opaque/src/Unpack.hpp
+++ b/delegate/opaque/src/Unpack.hpp
@@ -96,8 +96,6 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
unpackDimSizes[unpackAxis] /= unpackNum;
armnn::SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
- splitDesc.SetAxis(unpackAxis);
-
for (unsigned int j = 0; j < unpackNum; ++j)
{
// Set the size of the views.
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 5e4628bd77..e6374aea8f 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -216,16 +216,12 @@ void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int
ViewsDescriptor::ViewsDescriptor()
: m_Origins()
, m_ViewSizes(nullptr)
-, m_IsAxisSet(false)
-, m_Axis(0)
{}
ViewsDescriptor::ViewsDescriptor(uint32_t numViews, uint32_t numDimensions /*= 4*/)
: m_Origins(numViews, numDimensions)
, m_ViewSizes(numViews > 0 && numDimensions > 0 ?
new uint32_t *[numViews]() : nullptr)
- , m_IsAxisSet(false)
- , m_Axis(0)
{
if (m_ViewSizes)
{
@@ -240,8 +236,6 @@ ViewsDescriptor::ViewsDescriptor(const ViewsDescriptor& other)
: m_Origins(other.m_Origins)
, m_ViewSizes(other.GetNumViews() > 0 && other.GetNumDimensions() > 0 ?
new uint32_t *[other.GetNumViews()]() : nullptr)
- , m_IsAxisSet(other.m_IsAxisSet)
- , m_Axis(other.m_Axis)
{
if (m_ViewSizes)
{
@@ -367,8 +361,6 @@ void swap(ViewsDescriptor& first, ViewsDescriptor& second)
using std::swap;
swap(first.m_Origins, second.m_Origins);
swap(first.m_ViewSizes, second.m_ViewSizes);
- swap(first.m_IsAxisSet, second.m_IsAxisSet);
- swap(first.m_Axis, second.m_Axis);
}
void ViewsDescriptor::SetAxis(int32_t axis)
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 8ca1e83dfe..eb77f92842 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -3228,11 +3228,6 @@ void IDeserializer::DeserializerImpl::ParseSplitter(GraphPtr graph, unsigned int
}
}
- if (flatBufferViewsDescriptor->hasAxis())
- {
- viewsDescriptor.SetAxis(flatBufferViewsDescriptor->axis());
- }
-
auto layerName = GetLayerName(graph, layerIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(viewsDescriptor, layerName.c_str());
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 1b7e46e51a..6cadb598a2 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1306,9 +1306,7 @@ void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer*
// Create FlatBuffer ViewsDescriptor
auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
flatBufferOriginDescriptor,
- m_flatBufferBuilder.CreateVector(flatBufferViewSizes),
- viewsDescriptor.HasAxis(),
- viewsDescriptor.GetAxis());
+ m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index bfe3fc6467..b2590eaa42 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -2644,7 +2644,6 @@ TEST_CASE("SerializeSplitter")
desc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
}
}
- desc.SetAxis(1);
const std::string layerName("splitter");
const armnn::TensorInfo inputInfo(numDimensions, inputShape, armnn::DataType::Float32);
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index f40f7ffe0e..301989ebb7 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -4213,7 +4213,7 @@ void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
}
splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
}
- splitDesc.SetAxis(unpackAxis);
+
auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
@@ -4352,10 +4352,7 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
}
splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
}
- if (axisTensorInfo.GetNumElements() == 1)
- {
- splitDesc.SetAxis(axis);
- }
+
auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
@@ -4534,7 +4531,6 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
accumSplit += splitSize;
}
- splitDesc.SetAxis(axis);
auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 9552b7620a..fc59b281b5 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -9,7 +9,6 @@
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/backends/WorkloadData.hpp>
-#include <armnnUtils/TensorUtils.hpp>
#include <arm_compute/runtime/FunctionDescriptors.h>
#include <arm_compute/function_info/FullyConnectedLayerInfo.h>
@@ -248,20 +247,13 @@ inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor&
unsigned int numDimensions = desc.GetNumDimensions();
std::set<unsigned int> splitAxis;
- if (desc.HasAxis())
+ for (unsigned int i = 0; i < numSplit; ++i)
{
- splitAxis.insert(armnnUtils::GetUnsignedAxis(desc.GetNumDimensions(), desc.GetAxis()));
- }
- else
- {
- for (unsigned int i = 0; i < numSplit; ++i)
+ for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
{
- for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
+ if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
{
- if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
- {
- splitAxis.insert(dimIdx);
- }
+ splitAxis.insert(dimIdx);
}
}
}
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index 13483e5ebd..9e3d83c0f4 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2020,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -199,29 +199,11 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
// Do the first split
armnn::SplitterQueueDescriptor data;
- data.m_Parameters = armnn::SplitterDescriptor(2, 3);
-
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
- data.m_Parameters.SetViewSize(0, 0, outputChannels1);
- data.m_Parameters.SetViewSize(0, 1, outputHeight1);
- data.m_Parameters.SetViewSize(0, 2, outputWidth1);
-
- data.m_Parameters.SetViewSize(1, 0, outputChannels2);
- data.m_Parameters.SetViewSize(1, 1, outputHeight2);
- data.m_Parameters.SetViewSize(1, 2, outputWidth2);
-
- data.m_Parameters.SetViewOriginCoord(0, 0, 0);
- data.m_Parameters.SetViewOriginCoord(0, 1, 0);
- data.m_Parameters.SetViewOriginCoord(0, 2, 0);
-
- data.m_Parameters.SetViewOriginCoord(1, 0, 1);
- data.m_Parameters.SetViewOriginCoord(1, 1, 0);
- data.m_Parameters.SetViewOriginCoord(1, 2, 0);
-
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
@@ -242,29 +224,11 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
// Do the second split.
armnn::SplitterQueueDescriptor data2;
- data2.m_Parameters = armnn::SplitterDescriptor(2, 3);
-
armnn::WorkloadInfo info2;
AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
- data2.m_Parameters.SetViewSize(0, 0, outputChannels1);
- data2.m_Parameters.SetViewSize(0, 1, outputHeight1);
- data2.m_Parameters.SetViewSize(0, 2, outputWidth1);
-
- data2.m_Parameters.SetViewSize(1, 0, outputChannels2);
- data2.m_Parameters.SetViewSize(1, 1, outputHeight2);
- data2.m_Parameters.SetViewSize(1, 2, outputWidth1);
-
- data2.m_Parameters.SetViewOriginCoord(0, 0, 0);
- data2.m_Parameters.SetViewOriginCoord(0, 1, 0);
- data2.m_Parameters.SetViewOriginCoord(0, 2, 0);
-
- data2.m_Parameters.SetViewOriginCoord(1, 0, 1);
- data2.m_Parameters.SetViewOriginCoord(1, 1, 0);
- data2.m_Parameters.SetViewOriginCoord(1, 2, 0);
-
data2.m_ViewOrigins.push_back(window3);
data2.m_ViewOrigins.push_back(window4);
@@ -343,17 +307,6 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
- data.m_Parameters = armnn::SplitterDescriptor(1, 3);
- data.m_Parameters.SetAxis(0);
-
- data.m_Parameters.SetViewSize(0, 0, 3);
- data.m_Parameters.SetViewSize(0, 1, 6);
- data.m_Parameters.SetViewSize(0, 2, 5);
-
- data.m_Parameters.SetViewOriginCoord(0, 0, 0);
- data.m_Parameters.SetViewOriginCoord(0, 1, 0);
- data.m_Parameters.SetViewOriginCoord(0, 2, 0);
-
data.m_ViewOrigins.push_back(window);
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp
index be3ca5e05a..82e41d3ff6 100644
--- a/src/backends/cl/ClTensorHandleFactory.cpp
+++ b/src/backends/cl/ClTensorHandleFactory.cpp
@@ -103,7 +103,7 @@ const FactoryId& ClTensorHandleFactory::GetId() const
bool ClTensorHandleFactory::SupportsSubTensors() const
{
- return false;
+ return true;
}
MemorySourceFlags ClTensorHandleFactory::GetExportFlags() const
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
index 2597b5f28b..ce3ce5c0d7 100644
--- a/src/backends/neon/NeonTensorHandleFactory.cpp
+++ b/src/backends/neon/NeonTensorHandleFactory.cpp
@@ -104,7 +104,7 @@ bool NeonTensorHandleFactory::SupportsInPlaceComputation() const
bool NeonTensorHandleFactory::SupportsSubTensors() const
{
- return false;
+ return true;
}
MemorySourceFlags NeonTensorHandleFactory::GetExportFlags() const
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index a94e4dd187..c8e781b71d 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <Graph.hpp>
@@ -89,6 +89,81 @@ TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
CHECK(capabilities[0].m_Value);
}
+TEST_CASE("ConcatOnXorYSubTensorsNoPaddingRequiredTest")
+{
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ // Set up tensor infos
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
+ const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({2, 3, 4, 2}, armnn::DataType::Float32);
+
+ armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Abs);
+
+ // Create the network
+ armnn::IConnectableLayer* const input0Layer = net->AddInputLayer(0, "input_0");
+ input0Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ armnn::IConnectableLayer* elementwiseUnaryLayer0 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary_0");
+ elementwiseUnaryLayer0->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
+ input0Layer->GetOutputSlot(0).Connect(elementwiseUnaryLayer0->GetInputSlot(0));
+
+ armnn::IConnectableLayer* const input1Layer = net->AddInputLayer(1, "input_1");
+ input1Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ armnn::IConnectableLayer* elementwiseUnaryLayer1 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary_1");
+ elementwiseUnaryLayer1->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
+ input1Layer->GetOutputSlot(0).Connect(elementwiseUnaryLayer1->GetInputSlot(0));
+
+ std::array<armnn::TensorShape, 2> concatInputShapes = { intermediateInfo.GetShape(), intermediateInfo.GetShape() };
+ armnn::IConnectableLayer* const concatLayer = net->AddConcatLayer(armnn::CreateDescriptorForConcatenation(
+ concatInputShapes.begin(), concatInputShapes.end(), 2), "concatenation");
+ concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+ elementwiseUnaryLayer0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ elementwiseUnaryLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
+
+ armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output");
+ concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
+
+ // Load graph into runtime
+ armnn::NetworkId networkIdentifier;
+ runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
+
+ // now check the concat how many sub-tensors it is using..
+ auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
+ {
+ if (subTensorHandle && subTensorHandle->GetParent())
+ {
+ return true;
+ }
+ return false;
+ };
+
+ for (auto&& layer : theGraph)
+ {
+ if(layer->GetType() == armnn::LayerType::Concat)
+ {
+ unsigned int numberOfSubTensors = 0;
+ for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
+ {
+ const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
+ if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
+ {
+ ++numberOfSubTensors;
+ }
+ }
+ // sub-tensors should be supported in this configuration
+ ARMNN_ASSERT(numberOfSubTensors > 0);
+ }
+ }
+}
+
TEST_CASE("ConcatonXorYPaddingRequiredTest")
{
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -172,6 +247,212 @@ TEST_CASE("ConcatonXorYPaddingRequiredTest")
ARMNN_ASSERT(numberOfSubTensors == 0);
}
+TEST_CASE("SplitteronXorYNoPaddingRequiredTest")
+{
+ using namespace armnn;
+
+ unsigned int splitAxis = 2;
+ unsigned int numSplit = 2;
+
+ const TensorShape& inputShape = { 2, 3, 4, 2 };
+ const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32);
+ const std::vector<TensorShape> outputShapes{{ 2, 3, 2, 2 },
+ { 2, 3, 2, 2 }};
+ const float qScale = 1.0f;
+ const int32_t qOffset = 0;
+
+ // Creates structures for input & output.
+ std::vector<float> inputData{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 13, 14,
+ 15, 16,
+ 17, 18,
+ 19, 20,
+ 21, 22,
+ 23, 24,
+ 25, 26,
+ 27, 28,
+ 29, 30,
+ 31, 32,
+ 33, 34,
+ 35, 36,
+ 37, 38,
+ 39, 40,
+ 41, 42,
+ 43, 44,
+ 45, 46,
+ 47, 48
+ };
+
+ std::vector<float> expectedOutput0{
+ 1, 2,
+ 3, 4,
+ 9, 10,
+ 11, 12,
+ 17, 18,
+ 19, 20,
+ 25, 26,
+ 27, 28,
+ 33, 34,
+ 35, 36,
+ 41, 42,
+ 43, 44
+ };
+
+ std::vector<float> expectedOutput1{
+ 5, 6,
+ 7, 8,
+ 13, 14,
+ 15, 16,
+ 21, 22,
+ 23, 24,
+ 29, 30,
+ 31, 32,
+ 37, 38,
+ 39, 40,
+ 45, 46,
+ 47, 48
+ };
+
+ // Builds up the structure of the network.
+ INetworkPtr net(INetwork::Create());
+
+ TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32, qScale, qOffset);
+
+ armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Abs);
+
+ // Splitter
+ std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
+
+ // Add current input shape to splitterDimSizes
+ for (unsigned int i = 0; i < inputShape.GetNumDimensions(); ++i)
+ {
+ splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
+ }
+
+ if (splitterDimSizes[splitAxis] % numSplit != 0)
+ {
+ throw ParseException("Number of splits must evenly divide the dimension");
+ }
+
+ splitterDimSizes[splitAxis] /= numSplit;
+
+ SplitterDescriptor splitDesc(numSplit, inputShape.GetNumDimensions());
+
+ for (unsigned int g = 0; g < numSplit; ++g)
+ {
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
+ {
+ splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
+ }
+ splitDesc.SetViewOriginCoord(g, splitAxis, splitterDimSizes[splitAxis] * g);
+ }
+ IConnectableLayer* input = net->AddInputLayer(0, "input");
+ IConnectableLayer* elementWiseUnary0 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseunary_0");
+ IConnectableLayer* elementWiseUnary1 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseunary_0");
+ IConnectableLayer* splitter = net->AddSplitterLayer(splitDesc, "splitter");
+
+ // Connections
+ Connect(input, splitter, inputTensorInfo, 0, 0);
+ Connect(splitter, elementWiseUnary0, intermediateInfo, 0, 0);
+ Connect(splitter, elementWiseUnary1, intermediateInfo, 1, 0);
+
+ std::vector<IConnectableLayer*> pooling2dLayers{elementWiseUnary0, elementWiseUnary1};
+
+ for (unsigned int i = 0; i < outputShapes.size(); ++i)
+ {
+ TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
+ IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
+ Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
+ }
+
+ std::map<int, std::vector<float>> inputTensorData = {{ 0,inputData }};
+ std::map<int, std::vector<float>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
+
+ // Load graph into runtime
+ armnn::NetworkId networkIdentifier;
+ runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
+
+ // now check the concat how many sub-tensors it is using..
+ auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
+ {
+ if (subTensorHandle && subTensorHandle->GetParent())
+ {
+ return true;
+ }
+ return false;
+ };
+
+ for (auto&& layer : theGraph)
+ {
+ if(layer->GetType() == armnn::LayerType::ElementwiseUnary)
+ {
+ unsigned int numberOfSubTensors = 0;
+ for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
+ {
+ const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
+ if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
+ {
+ ++numberOfSubTensors;
+ }
+ }
+ // sub-tensors should be supported in this configuration
+ ARMNN_ASSERT(numberOfSubTensors > 0);
+ }
+ }
+
+ InputTensors inputTensors;
+ inputTensors.reserve(inputTensorData.size());
+ for (auto&& it : inputTensorData)
+ {
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
+ inputTensorInfo.SetConstant(true);
+ inputTensors.push_back({it.first,
+ ConstTensor(inputTensorInfo, it.second.data())});
+ }
+ OutputTensors outputTensors;
+ outputTensors.reserve(expectedOutputData.size());
+ std::map<int, std::vector<float>> outputStorage;
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<float> out(it.second.size());
+ outputStorage.emplace(it.first, out);
+ outputTensors.push_back({it.first,
+ Tensor(runtime->GetOutputTensorInfo(networkIdentifier, it.first),
+ outputStorage.at(it.first).data())});
+ }
+
+ // Does the inference.
+ runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+ // Checks the results.
+ float tolerance = 0.000001f;
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<float> out = outputStorage.at(it.first);
+ for (unsigned int i = 0; i < out.size(); ++i)
+ {
+ CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+ "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
+
+ }
+ }
+}
+
TEST_CASE("SplitteronXorYPaddingRequiredTest")
{
using namespace armnn;