From 008270f8c1359a7d62c2f881326b4d3f0d8b7b56 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Arm Date: Fri, 11 Aug 2023 08:55:21 +0000 Subject: Revert "MLCE-1093 Reshape and concat invalid results" This reverts commit 4980e21193f0a14fef084a7f4b4197392f3c0845. Reason for revert: Android Build for v82a failed due to schema not re-generating Change-Id: Ic19cf471b487f321c97ff837d36526512fb12fa4 --- src/armnn/Descriptors.cpp | 8 - src/armnnDeserializer/Deserializer.cpp | 5 - src/armnnSerializer/Serializer.cpp | 4 +- src/armnnSerializer/test/SerializerTests.cpp | 1 - src/armnnTfLiteParser/TfLiteParser.cpp | 8 +- src/backends/aclCommon/ArmComputeUtils.hpp | 16 +- .../test/layerTests/SplitterTestImpl.cpp | 49 +--- src/backends/cl/ClTensorHandleFactory.cpp | 2 +- src/backends/neon/NeonTensorHandleFactory.cpp | 2 +- src/backends/neon/test/NeonTensorHandleTests.cpp | 283 ++++++++++++++++++++- 10 files changed, 292 insertions(+), 86 deletions(-) (limited to 'src') diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp index 5e4628bd77..e6374aea8f 100644 --- a/src/armnn/Descriptors.cpp +++ b/src/armnn/Descriptors.cpp @@ -216,16 +216,12 @@ void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int ViewsDescriptor::ViewsDescriptor() : m_Origins() , m_ViewSizes(nullptr) -, m_IsAxisSet(false) -, m_Axis(0) {} ViewsDescriptor::ViewsDescriptor(uint32_t numViews, uint32_t numDimensions /*= 4*/) : m_Origins(numViews, numDimensions) , m_ViewSizes(numViews > 0 && numDimensions > 0 ? new uint32_t *[numViews]() : nullptr) - , m_IsAxisSet(false) - , m_Axis(0) { if (m_ViewSizes) { @@ -240,8 +236,6 @@ ViewsDescriptor::ViewsDescriptor(const ViewsDescriptor& other) : m_Origins(other.m_Origins) , m_ViewSizes(other.GetNumViews() > 0 && other.GetNumDimensions() > 0 ? new uint32_t *[other.GetNumViews()]() : nullptr) - , m_IsAxisSet(other.m_IsAxisSet) - , m_Axis(other.m_Axis) { if (m_ViewSizes) { @@ -367,8 +361,6 @@ void swap(ViewsDescriptor& first, ViewsDescriptor& second) using std::swap; swap(first.m_Origins, second.m_Origins); swap(first.m_ViewSizes, second.m_ViewSizes); - swap(first.m_IsAxisSet, second.m_IsAxisSet); - swap(first.m_Axis, second.m_Axis); } void ViewsDescriptor::SetAxis(int32_t axis) diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 8ca1e83dfe..eb77f92842 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -3228,11 +3228,6 @@ void IDeserializer::DeserializerImpl::ParseSplitter(GraphPtr graph, unsigned int } } - if (flatBufferViewsDescriptor->hasAxis()) - { - viewsDescriptor.SetAxis(flatBufferViewsDescriptor->axis()); - } - auto layerName = GetLayerName(graph, layerIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(viewsDescriptor, layerName.c_str()); diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 1b7e46e51a..6cadb598a2 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -1306,9 +1306,7 @@ void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* // Create FlatBuffer ViewsDescriptor auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder, flatBufferOriginDescriptor, - m_flatBufferBuilder.CreateVector(flatBufferViewSizes), - viewsDescriptor.HasAxis(), - viewsDescriptor.GetAxis()); + m_flatBufferBuilder.CreateVector(flatBufferViewSizes)); // Create FlatBuffer BaseLayer auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter); diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index bfe3fc6467..b2590eaa42 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -2644,7 +2644,6 @@ TEST_CASE("SerializeSplitter") desc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]); } } - desc.SetAxis(1); const std::string layerName("splitter"); const armnn::TensorInfo inputInfo(numDimensions, inputShape, armnn::DataType::Float32); diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index f40f7ffe0e..301989ebb7 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -4213,7 +4213,7 @@ void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex) } splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j); } - splitDesc.SetAxis(unpackAxis); + auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str()); @@ -4352,10 +4352,7 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex) } splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j); } - if (axisTensorInfo.GetNumElements() == 1) - { - splitDesc.SetAxis(axis); - } + auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str()); @@ -4534,7 +4531,6 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex) splitDesc.SetViewOriginCoord(j, splitDim, accumSplit); accumSplit += splitSize; } - splitDesc.SetAxis(axis); auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str()); diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp index 9552b7620a..fc59b281b5 100644 --- a/src/backends/aclCommon/ArmComputeUtils.hpp +++ b/src/backends/aclCommon/ArmComputeUtils.hpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include @@ -248,20 +247,13 @@ inline std::set ComputeSplitAxis(const armnn::SplitterDescriptor& unsigned int numDimensions = desc.GetNumDimensions(); std::set splitAxis; - if (desc.HasAxis()) + for (unsigned int i = 0; i < numSplit; ++i) { - splitAxis.insert(armnnUtils::GetUnsignedAxis(desc.GetNumDimensions(), desc.GetAxis())); - } - else - { - for (unsigned int i = 0; i < numSplit; ++i) + for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx) { - for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx) + if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx]) { - if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx]) - { - splitAxis.insert(dimIdx); - } + splitAxis.insert(dimIdx); } } } diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp index 13483e5ebd..9e3d83c0f4 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019-2020,2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -199,29 +199,11 @@ std::vector> SplitterTestCommon( // Do the first split armnn::SplitterQueueDescriptor data; - data.m_Parameters = armnn::SplitterDescriptor(2, 3); - armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get()); AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get()); - data.m_Parameters.SetViewSize(0, 0, outputChannels1); - data.m_Parameters.SetViewSize(0, 1, outputHeight1); - data.m_Parameters.SetViewSize(0, 2, outputWidth1); - - data.m_Parameters.SetViewSize(1, 0, outputChannels2); - data.m_Parameters.SetViewSize(1, 1, outputHeight2); - data.m_Parameters.SetViewSize(1, 2, outputWidth2); - - data.m_Parameters.SetViewOriginCoord(0, 0, 0); - data.m_Parameters.SetViewOriginCoord(0, 1, 0); - data.m_Parameters.SetViewOriginCoord(0, 2, 0); - - data.m_Parameters.SetViewOriginCoord(1, 0, 1); - data.m_Parameters.SetViewOriginCoord(1, 1, 0); - data.m_Parameters.SetViewOriginCoord(1, 2, 0); - data.m_ViewOrigins.push_back(window1); data.m_ViewOrigins.push_back(window2); @@ -242,29 +224,11 @@ std::vector> SplitterTestCommon( // Do the second split. armnn::SplitterQueueDescriptor data2; - data2.m_Parameters = armnn::SplitterDescriptor(2, 3); - armnn::WorkloadInfo info2; AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get()); AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get()); AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get()); - data2.m_Parameters.SetViewSize(0, 0, outputChannels1); - data2.m_Parameters.SetViewSize(0, 1, outputHeight1); - data2.m_Parameters.SetViewSize(0, 2, outputWidth1); - - data2.m_Parameters.SetViewSize(1, 0, outputChannels2); - data2.m_Parameters.SetViewSize(1, 1, outputHeight2); - data2.m_Parameters.SetViewSize(1, 2, outputWidth1); - - data2.m_Parameters.SetViewOriginCoord(0, 0, 0); - data2.m_Parameters.SetViewOriginCoord(0, 1, 0); - data2.m_Parameters.SetViewOriginCoord(0, 2, 0); - - data2.m_Parameters.SetViewOriginCoord(1, 0, 1); - data2.m_Parameters.SetViewOriginCoord(1, 1, 0); - data2.m_Parameters.SetViewOriginCoord(1, 2, 0); - data2.m_ViewOrigins.push_back(window3); data2.m_ViewOrigins.push_back(window4); @@ -343,17 +307,6 @@ LayerTestResult CopyViaSplitterTestImpl( AddInputToWorkload(data, info, tensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, tensorInfo, outputHandle.get()); - data.m_Parameters = armnn::SplitterDescriptor(1, 3); - data.m_Parameters.SetAxis(0); - - data.m_Parameters.SetViewSize(0, 0, 3); - data.m_Parameters.SetViewSize(0, 1, 6); - data.m_Parameters.SetViewSize(0, 2, 5); - - data.m_Parameters.SetViewOriginCoord(0, 0, 0); - data.m_Parameters.SetViewOriginCoord(0, 1, 0); - data.m_Parameters.SetViewOriginCoord(0, 2, 0); - data.m_ViewOrigins.push_back(window); std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter, diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp index be3ca5e05a..82e41d3ff6 100644 --- a/src/backends/cl/ClTensorHandleFactory.cpp +++ b/src/backends/cl/ClTensorHandleFactory.cpp @@ -103,7 +103,7 @@ const FactoryId& ClTensorHandleFactory::GetId() const bool ClTensorHandleFactory::SupportsSubTensors() const { - return false; + return true; } MemorySourceFlags ClTensorHandleFactory::GetExportFlags() const diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp index 2597b5f28b..ce3ce5c0d7 100644 --- a/src/backends/neon/NeonTensorHandleFactory.cpp +++ b/src/backends/neon/NeonTensorHandleFactory.cpp @@ -104,7 +104,7 @@ bool NeonTensorHandleFactory::SupportsInPlaceComputation() const bool NeonTensorHandleFactory::SupportsSubTensors() const { - return false; + return true; } MemorySourceFlags NeonTensorHandleFactory::GetExportFlags() const diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp index a94e4dd187..c8e781b71d 100644 --- a/src/backends/neon/test/NeonTensorHandleTests.cpp +++ b/src/backends/neon/test/NeonTensorHandleTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include @@ -89,6 +89,81 @@ TEST_CASE("NeonTensorHandleGetCapabilitiesPadding") CHECK(capabilities[0].m_Value); } +TEST_CASE("ConcatOnXorYSubTensorsNoPaddingRequiredTest") +{ + armnn::INetworkPtr net(armnn::INetwork::Create()); + + // Set up tensor infos + const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32); + const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32); + const armnn::TensorInfo outputInfo = armnn::TensorInfo({2, 3, 4, 2}, armnn::DataType::Float32); + + armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Abs); + + // Create the network + armnn::IConnectableLayer* const input0Layer = net->AddInputLayer(0, "input_0"); + input0Layer->GetOutputSlot(0).SetTensorInfo(inputInfo); + armnn::IConnectableLayer* elementwiseUnaryLayer0 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary_0"); + elementwiseUnaryLayer0->GetOutputSlot(0).SetTensorInfo(intermediateInfo); + input0Layer->GetOutputSlot(0).Connect(elementwiseUnaryLayer0->GetInputSlot(0)); + + armnn::IConnectableLayer* const input1Layer = net->AddInputLayer(1, "input_1"); + input1Layer->GetOutputSlot(0).SetTensorInfo(inputInfo); + armnn::IConnectableLayer* elementwiseUnaryLayer1 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary_1"); + elementwiseUnaryLayer1->GetOutputSlot(0).SetTensorInfo(intermediateInfo); + input1Layer->GetOutputSlot(0).Connect(elementwiseUnaryLayer1->GetInputSlot(0)); + + std::array concatInputShapes = { intermediateInfo.GetShape(), intermediateInfo.GetShape() }; + armnn::IConnectableLayer* const concatLayer = net->AddConcatLayer(armnn::CreateDescriptorForConcatenation( + concatInputShapes.begin(), concatInputShapes.end(), 2), "concatenation"); + concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + elementwiseUnaryLayer0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + elementwiseUnaryLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); + + armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output"); + concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + std::vector backends = { armnn::Compute::CpuAcc }; + armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); + + const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get()); + + // Load graph into runtime + armnn::NetworkId networkIdentifier; + runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet)); + + // now check the concat how many sub-tensors it is using.. + auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle) + { + if (subTensorHandle && subTensorHandle->GetParent()) + { + return true; + } + return false; + }; + + for (auto&& layer : theGraph) + { + if(layer->GetType() == armnn::LayerType::Concat) + { + unsigned int numberOfSubTensors = 0; + for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) + { + const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot(); + if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData())) + { + ++numberOfSubTensors; + } + } + // sub-tensors should be supported in this configuration + ARMNN_ASSERT(numberOfSubTensors > 0); + } + } +} + TEST_CASE("ConcatonXorYPaddingRequiredTest") { armnn::INetworkPtr net(armnn::INetwork::Create()); @@ -172,6 +247,212 @@ TEST_CASE("ConcatonXorYPaddingRequiredTest") ARMNN_ASSERT(numberOfSubTensors == 0); } +TEST_CASE("SplitteronXorYNoPaddingRequiredTest") +{ + using namespace armnn; + + unsigned int splitAxis = 2; + unsigned int numSplit = 2; + + const TensorShape& inputShape = { 2, 3, 4, 2 }; + const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32); + const std::vector outputShapes{{ 2, 3, 2, 2 }, + { 2, 3, 2, 2 }}; + const float qScale = 1.0f; + const int32_t qOffset = 0; + + // Creates structures for input & output. + std::vector inputData{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 13, 14, + 15, 16, + 17, 18, + 19, 20, + 21, 22, + 23, 24, + 25, 26, + 27, 28, + 29, 30, + 31, 32, + 33, 34, + 35, 36, + 37, 38, + 39, 40, + 41, 42, + 43, 44, + 45, 46, + 47, 48 + }; + + std::vector expectedOutput0{ + 1, 2, + 3, 4, + 9, 10, + 11, 12, + 17, 18, + 19, 20, + 25, 26, + 27, 28, + 33, 34, + 35, 36, + 41, 42, + 43, 44 + }; + + std::vector expectedOutput1{ + 5, 6, + 7, 8, + 13, 14, + 15, 16, + 21, 22, + 23, 24, + 29, 30, + 31, 32, + 37, 38, + 39, 40, + 45, 46, + 47, 48 + }; + + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32, qScale, qOffset); + + armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Abs); + + // Splitter + std::vector splitterDimSizes(inputShape.GetNumDimensions()); + + // Add current input shape to splitterDimSizes + for (unsigned int i = 0; i < inputShape.GetNumDimensions(); ++i) + { + splitterDimSizes[i] = inputTensorInfo.GetShape()[i]; + } + + if (splitterDimSizes[splitAxis] % numSplit != 0) + { + throw ParseException("Number of splits must evenly divide the dimension"); + } + + splitterDimSizes[splitAxis] /= numSplit; + + SplitterDescriptor splitDesc(numSplit, inputShape.GetNumDimensions()); + + for (unsigned int g = 0; g < numSplit; ++g) + { + // Set the size of the views. + for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx) + { + splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]); + } + splitDesc.SetViewOriginCoord(g, splitAxis, splitterDimSizes[splitAxis] * g); + } + IConnectableLayer* input = net->AddInputLayer(0, "input"); + IConnectableLayer* elementWiseUnary0 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseunary_0"); + IConnectableLayer* elementWiseUnary1 = net->AddElementwiseUnaryLayer(descriptor, "elementwiseunary_0"); + IConnectableLayer* splitter = net->AddSplitterLayer(splitDesc, "splitter"); + + // Connections + Connect(input, splitter, inputTensorInfo, 0, 0); + Connect(splitter, elementWiseUnary0, intermediateInfo, 0, 0); + Connect(splitter, elementWiseUnary1, intermediateInfo, 1, 0); + + std::vector pooling2dLayers{elementWiseUnary0, elementWiseUnary1}; + + for (unsigned int i = 0; i < outputShapes.size(); ++i) + { + TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset); + IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast(i)); + Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0); + } + + std::map> inputTensorData = {{ 0,inputData }}; + std::map> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }}; + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + std::vector backends = { armnn::Compute::CpuAcc }; + armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); + + const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get()); + + // Load graph into runtime + armnn::NetworkId networkIdentifier; + runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet)); + + // now check the concat how many sub-tensors it is using.. + auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle) + { + if (subTensorHandle && subTensorHandle->GetParent()) + { + return true; + } + return false; + }; + + for (auto&& layer : theGraph) + { + if(layer->GetType() == armnn::LayerType::ElementwiseUnary) + { + unsigned int numberOfSubTensors = 0; + for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) + { + const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot(); + if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData())) + { + ++numberOfSubTensors; + } + } + // sub-tensors should be supported in this configuration + ARMNN_ASSERT(numberOfSubTensors > 0); + } + } + + InputTensors inputTensors; + inputTensors.reserve(inputTensorData.size()); + for (auto&& it : inputTensorData) + { + TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first); + inputTensorInfo.SetConstant(true); + inputTensors.push_back({it.first, + ConstTensor(inputTensorInfo, it.second.data())}); + } + OutputTensors outputTensors; + outputTensors.reserve(expectedOutputData.size()); + std::map> outputStorage; + for (auto&& it : expectedOutputData) + { + std::vector out(it.second.size()); + outputStorage.emplace(it.first, out); + outputTensors.push_back({it.first, + Tensor(runtime->GetOutputTensorInfo(networkIdentifier, it.first), + outputStorage.at(it.first).data())}); + } + + // Does the inference. + runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors); + + // Checks the results. + float tolerance = 0.000001f; + for (auto&& it : expectedOutputData) + { + std::vector out = outputStorage.at(it.first); + for (unsigned int i = 0; i < out.size(); ++i) + { + CHECK_MESSAGE(Compare(it.second[i], out[i], tolerance) == true, + "Actual output: " << out[i] << ". Expected output:" << it.second[i]); + + } + } +} + TEST_CASE("SplitteronXorYPaddingRequiredTest") { using namespace armnn; -- cgit v1.2.1