From 8800c00770ed14eb48045cfcf033d6b67595a126 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Mon, 19 Nov 2018 13:19:28 +0000 Subject: IVGCVSW-2169 Remove DataLayoutIndexed from public API Change-Id: If8d8087d9d365e467d3ca9bf9c40d7219cb75cfd --- include/armnn/Descriptors.hpp | 14 ++-- include/armnn/Types.hpp | 44 ----------- src/armnn/InternalTypes.cpp | 12 --- src/armnn/layers/BatchToSpaceNdLayer.cpp | 3 +- src/armnn/layers/Convolution2dLayer.cpp | 1 + src/armnn/layers/DepthwiseConvolution2dLayer.cpp | 1 + src/armnn/layers/Pooling2dLayer.cpp | 8 +- src/armnn/layers/ResizeBilinearLayer.cpp | 5 +- src/armnn/layers/SpaceToBatchNdLayer.cpp | 6 +- src/armnn/test/CreateWorkload.hpp | 19 ++--- src/armnn/test/TensorHelpers.hpp | 6 +- src/backends/backendsCommon/CMakeLists.txt | 2 + src/backends/backendsCommon/DataLayoutIndexed.cpp | 22 ++++++ src/backends/backendsCommon/DataLayoutIndexed.hpp | 51 +++++++++++++ src/backends/backendsCommon/WorkloadData.cpp | 14 ++-- src/backends/backendsCommon/common.mk | 1 + .../backendsCommon/test/Conv2dTestImpl.hpp | 41 ++++++----- src/backends/backendsCommon/test/LayerTests.cpp | 86 +++++++++++----------- src/backends/backendsCommon/test/LayerTests.hpp | 54 +++++++------- .../backendsCommon/test/Pooling2dTestImpl.hpp | 21 +++--- .../ClBatchNormalizationFloatWorkload.cpp | 16 ++-- .../workloads/ClL2NormalizationFloatWorkload.cpp | 12 ++- src/backends/cl/workloads/ClPooling2dWorkload.cpp | 8 +- .../cl/workloads/ClResizeBilinearFloatWorkload.cpp | 2 +- .../NeonBatchNormalizationFloatWorkload.cpp | 16 ++-- .../workloads/NeonL2NormalizationFloatWorkload.cpp | 11 ++- .../neon/workloads/NeonPooling2dWorkload.cpp | 6 +- .../reference/workloads/BatchToSpaceNd.hpp | 1 + src/backends/reference/workloads/ConvImpl.hpp | 2 + .../reference/workloads/ResizeBilinear.hpp | 2 + .../reference/workloads/SpaceToBatchNd.cpp | 2 + .../reference/workloads/TensorBufferArrayView.hpp | 2 + 32 files changed, 263 insertions(+), 228 deletions(-) create mode 100644 src/backends/backendsCommon/DataLayoutIndexed.cpp create mode 100644 src/backends/backendsCommon/DataLayoutIndexed.hpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 0abc7583b9..b705abe729 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -198,7 +198,7 @@ struct Pooling2dDescriptor uint32_t m_StrideY; OutputShapeRounding m_OutputShapeRounding; PaddingMethod m_PaddingMethod; - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; struct FullyConnectedDescriptor @@ -286,7 +286,7 @@ struct L2NormalizationDescriptor : m_DataLayout(DataLayout::NCHW) {} - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; struct BatchNormalizationDescriptor @@ -297,7 +297,7 @@ struct BatchNormalizationDescriptor {} float m_Eps; - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; struct BatchToSpaceNdDescriptor @@ -316,7 +316,7 @@ struct BatchToSpaceNdDescriptor std::vector m_BlockShape; std::vector> m_Crops; - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; struct FakeQuantizationDescriptor @@ -340,7 +340,7 @@ struct ResizeBilinearDescriptor uint32_t m_TargetWidth; uint32_t m_TargetHeight; - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; struct ReshapeDescriptor @@ -371,7 +371,7 @@ struct SpaceToBatchNdDescriptor std::vector m_BlockShape; std::vector> m_PadList; - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; // temporary descriptor for Lstm @@ -455,7 +455,7 @@ struct StridedSliceDescriptor int32_t m_EllipsisMask; int32_t m_NewAxisMask; - DataLayoutIndexed m_DataLayout; + DataLayout m_DataLayout; }; } diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index cd6e17be37..d815005f5b 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -31,56 +31,12 @@ enum class DataType Signed32 = 3 }; -// Begin: DataLayout - enum class DataLayout { NCHW = 1, NHWC = 2 }; -// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout -class DataLayoutIndexed -{ -public: - DataLayoutIndexed(DataLayout dataLayout) : m_DataLayout(dataLayout) - { - switch (dataLayout) - { - case DataLayout::NHWC: - m_ChannelsIndex = 3; - m_HeightIndex = 1; - m_WidthIndex = 2; - break; - case DataLayout::NCHW: - m_ChannelsIndex = 1; - m_HeightIndex = 2; - m_WidthIndex = 3; - break; - default: - throw InvalidArgumentException("Unknown DataLayout value: " + - std::to_string(static_cast(dataLayout))); - } - } - - DataLayout GetDataLayout() const { return m_DataLayout; } - unsigned int GetChannelsIndex() const { return m_ChannelsIndex; } - unsigned int GetHeightIndex() const { return m_HeightIndex; } - unsigned int GetWidthIndex() const { return m_WidthIndex; } - -private: - DataLayout m_DataLayout; - unsigned int m_ChannelsIndex; - unsigned int m_HeightIndex; - unsigned int m_WidthIndex; -}; - -// Conversion methods - implementations in src/armnn/InternalTypes.cpp -bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed); -bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout); - -// End: DataLayout - enum class ActivationFunction { Sigmoid = 0, diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index dade1f70c3..f37b1a0c66 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -51,16 +51,4 @@ char const* GetLayerTypeAsCString(LayerType type) } } -// Definition in include/armnn/Types.hpp -bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed) -{ - return dataLayout == indexed.GetDataLayout(); -} - -// Definition in include/armnn/Types.hpp -bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout) -{ - return indexed.GetDataLayout() == dataLayout; -} - } diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp index aff818e664..e1b78b21df 100644 --- a/src/armnn/layers/BatchToSpaceNdLayer.cpp +++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -50,7 +51,7 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs() std::vector BatchToSpaceNdLayer::InferOutputShapes(const std::vector& inputShapes) const { - const DataLayoutIndexed & dataLayout = m_Param.m_DataLayout; + const DataLayoutIndexed dataLayout = m_Param.m_DataLayout; const TensorShape& inputShape = inputShapes[0]; unsigned int inBatchSize = inputShape[0]; unsigned int channelSize = inputShape[dataLayout.GetChannelsIndex()]; diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp index f3597e2914..4d3553f0cc 100644 --- a/src/armnn/layers/Convolution2dLayer.cpp +++ b/src/armnn/layers/Convolution2dLayer.cpp @@ -8,6 +8,7 @@ #include #include +#include #include namespace armnn diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp index f356e39335..6ad32a756a 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp @@ -8,6 +8,7 @@ #include #include +#include #include namespace armnn diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp index 821c011cd0..24b7a69c49 100644 --- a/src/armnn/layers/Pooling2dLayer.cpp +++ b/src/armnn/layers/Pooling2dLayer.cpp @@ -7,6 +7,7 @@ #include "LayerCloneBase.hpp" #include +#include #include #include @@ -33,13 +34,14 @@ std::vector Pooling2dLayer::InferOutputShapes(const std::vector +#include #include #include @@ -34,10 +35,10 @@ std::vector ResizeBilinearLayer::InferOutputShapes(const std::vecto { BOOST_ASSERT(inputShapes.size() == 1); const TensorShape& inputShape = inputShapes[0]; - + const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout; unsigned int outWidth = m_Param.m_TargetWidth; unsigned int outHeight = m_Param.m_TargetHeight; - unsigned int outChannels = inputShape[m_Param.m_DataLayout.GetChannelsIndex()]; + unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()]; unsigned int outBatch = inputShape[0]; TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ? diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp index cc93886e50..658945ef08 100644 --- a/src/armnn/layers/SpaceToBatchNdLayer.cpp +++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp @@ -9,6 +9,7 @@ #include +#include #include #include @@ -48,8 +49,9 @@ std::vector SpaceToBatchNdLayer::InferOutputShapes(const std::vecto 1U, std::multiplies<>()); - unsigned int heightIndex = m_Param.m_DataLayout.GetHeightIndex(); - unsigned int widthIndex = m_Param.m_DataLayout.GetWidthIndex(); + DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout; + unsigned int heightIndex = dimensionIndices.GetHeightIndex(); + unsigned int widthIndex = dimensionIndices.GetWidthIndex(); std::pair heightPad = m_Param.m_PadList[0]; std::pair widthPad = m_Param.m_PadList[1]; diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 1a9bd56ac5..b8ba72f271 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -8,6 +8,7 @@ #include +#include #include #include #include @@ -133,11 +134,11 @@ std::unique_ptr CreateArithmeticWorkloadTest(armnn::IWorkloadFacto template std::unique_ptr CreateBatchNormalizationWorkloadTest( - armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayoutIndexed dataLayout = DataLayout::NCHW) + armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) { TensorShape tensorShape; - switch (dataLayout.GetDataLayout()) + switch (dataLayout) { case DataLayout::NHWC: tensorShape = { 2, 4, 4, 3 }; @@ -184,7 +185,7 @@ std::unique_ptr CreateBatchNormalizationWorkl BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType))); BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType))); BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType))); - BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout)); + BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); // Returns so we can do extra, backend-specific tests. return workload; @@ -842,13 +843,12 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& template std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph, - DataLayoutIndexed dataLayout = - DataLayout::NCHW) + DataLayout dataLayout = DataLayout::NCHW) { TensorShape inputShape; TensorShape outputShape; - switch (dataLayout.GetDataLayout()) { + switch (dataLayout) { case DataLayout::NHWC: inputShape = { 2, 4, 4, 3 }; outputShape = { 2, 2, 2, 3 }; @@ -861,8 +861,9 @@ std::unique_ptr CreateResizeBilinearWorkloadTest(armnn:: // Creates the layer we're testing. ResizeBilinearDescriptor resizeDesc; - resizeDesc.m_TargetWidth = outputShape[dataLayout.GetWidthIndex()]; - resizeDesc.m_TargetHeight = outputShape[dataLayout.GetHeightIndex()]; + DataLayoutIndexed dimensionIndices = dataLayout; + resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()]; + resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()]; resizeDesc.m_DataLayout = dataLayout; Layer* const layer = graph.AddLayer(resizeDesc, "layer"); @@ -883,7 +884,7 @@ std::unique_ptr CreateResizeBilinearWorkloadTest(armnn:: ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); - BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout)); + BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); // Returns so we can do extra, backend-specific tests. return workload; diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index 4501c3532c..238232ae96 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -216,9 +216,9 @@ armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { - switch (dataLayout.GetDataLayout()) + switch (dataLayout) { case armnn::DataLayout::NCHW: return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType()); @@ -226,6 +226,6 @@ armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType()); default: throw armnn::InvalidArgumentException("unknown data layout [" - + std::to_string(static_cast(dataLayout.GetDataLayout())) + "]"); + + std::to_string(static_cast(dataLayout)) + "]"); } } diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt index 1fe9888901..f9bded76f4 100644 --- a/src/backends/backendsCommon/CMakeLists.txt +++ b/src/backends/backendsCommon/CMakeLists.txt @@ -9,6 +9,8 @@ list(APPEND armnnBackendsCommon_sources CpuTensorHandle.cpp CpuTensorHandleFwd.hpp CpuTensorHandle.hpp + DataLayoutIndexed.hpp + DataLayoutIndexed.cpp IBackendInternal.hpp IBackendContext.hpp ILayerSupport.cpp diff --git a/src/backends/backendsCommon/DataLayoutIndexed.cpp b/src/backends/backendsCommon/DataLayoutIndexed.cpp new file mode 100644 index 0000000000..b99d52c5b9 --- /dev/null +++ b/src/backends/backendsCommon/DataLayoutIndexed.cpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "DataLayoutIndexed.hpp" + +namespace armnn { + +// Definition in include/armnn/Types.hpp +bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed) +{ + return dataLayout == indexed.GetDataLayout(); +} + +// Definition in include/armnn/Types.hpp +bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout) +{ + return indexed.GetDataLayout() == dataLayout; +} + +} diff --git a/src/backends/backendsCommon/DataLayoutIndexed.hpp b/src/backends/backendsCommon/DataLayoutIndexed.hpp new file mode 100644 index 0000000000..8547475706 --- /dev/null +++ b/src/backends/backendsCommon/DataLayoutIndexed.hpp @@ -0,0 +1,51 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once +#include + +namespace armnn +{ + +// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout +class DataLayoutIndexed +{ +public: + DataLayoutIndexed(DataLayout dataLayout) : m_DataLayout(dataLayout) + { + switch (dataLayout) + { + case DataLayout::NHWC: + m_ChannelsIndex = 3; + m_HeightIndex = 1; + m_WidthIndex = 2; + break; + case DataLayout::NCHW: + m_ChannelsIndex = 1; + m_HeightIndex = 2; + m_WidthIndex = 3; + break; + default: + throw InvalidArgumentException("Unknown DataLayout value: " + + std::to_string(static_cast(dataLayout))); + } + } + + DataLayout GetDataLayout() const { return m_DataLayout; } + unsigned int GetChannelsIndex() const { return m_ChannelsIndex; } + unsigned int GetHeightIndex() const { return m_HeightIndex; } + unsigned int GetWidthIndex() const { return m_WidthIndex; } + +private: + DataLayout m_DataLayout; + unsigned int m_ChannelsIndex; + unsigned int m_HeightIndex; + unsigned int m_WidthIndex; +}; + +// Equality methods +bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed); +bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout); + +} diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 18ab4a8709..d5e3638a06 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -6,6 +6,8 @@ #include "CpuTensorHandle.hpp" +#include + #include #include #include @@ -675,10 +677,11 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c } { + DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout); const unsigned int inputChannelCount = - workloadInfo.m_InputTensorInfos[0].GetShape()[this->m_Parameters.m_DataLayout.GetChannelsIndex()]; + workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()]; const unsigned int outputChannelCount = - workloadInfo.m_OutputTensorInfos[0].GetShape()[this->m_Parameters.m_DataLayout.GetChannelsIndex()]; + workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()]; if (inputChannelCount != outputChannelCount) { throw InvalidArgumentException( @@ -774,14 +777,15 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c std::pair heightPad = m_Parameters.m_PadList[0]; std::pair widthPad = m_Parameters.m_PadList[1]; - unsigned int inputHeight = inputShape[m_Parameters.m_DataLayout.GetHeightIndex()] + DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout); + unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] + heightPad.first + heightPad.second; - unsigned int inputWidth = inputShape[m_Parameters.m_DataLayout.GetWidthIndex()] + unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] + widthPad.first + widthPad.second; unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth - * inputShape[m_Parameters.m_DataLayout.GetChannelsIndex()]; + * inputShape[dimensionIndices.GetChannelsIndex()]; if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements) { diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 8d29316599..c99dd392f4 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -9,6 +9,7 @@ COMMON_SOURCES := \ BackendRegistry.cpp \ + DataLayoutIndexed.cpp \ CpuTensorHandle.cpp \ ILayerSupport.cpp \ MemCopyWorkload.cpp \ diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp index d99b7f7fa3..6685a8edd2 100755 --- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp @@ -14,6 +14,7 @@ #include #include "QuantizeHelper.hpp" +#include #include #include #include @@ -75,7 +76,7 @@ LayerTestResult SimpleConvolution2dTestImpl( const boost::multi_array& originalOutputExpected, float qScale, int32_t qOffset, - const armnn::DataLayoutIndexed& layout = armnn::DataLayout::NCHW, + const armnn::DataLayout layout = armnn::DataLayout::NCHW, uint32_t padLeft = 0, uint32_t padTop = 0, uint32_t padRight = 0, @@ -137,7 +138,7 @@ LayerTestResult SimpleConvolution2dTestImpl( // at this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -166,7 +167,7 @@ LayerTestResult SimpleConvolution2dTestImpl( outputData.insert(outputData.end(), outputImage.begin(), outputImage.end()); // at this point if we require it permute the expected output - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(outputData.size()); armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data()); @@ -187,7 +188,7 @@ LayerTestResult SimpleConvolution2dTestImpl( armnn::ScopedCpuTensorHandle biasTensor(biasDesc); // Permute the kernel if necessary boost::multi_array kernel = boost::multi_array(originalKernel); - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data()); } @@ -210,7 +211,7 @@ LayerTestResult SimpleConvolution2dTestImpl( data.m_Parameters.m_PadTop = padTop; data.m_Parameters.m_PadBottom = padBottom; data.m_Parameters.m_BiasEnabled = biasEnabled; - data.m_Parameters.m_DataLayout = layout.GetDataLayout(); + data.m_Parameters.m_DataLayout = layout; std::unique_ptr workload = workloadFactory.CreateConvolution2d(data, info); inputHandle->Allocate(); @@ -327,7 +328,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( const boost::multi_array& outputExpected, float qScale, int32_t qOffset, - const armnn::DataLayoutIndexed& layout, + const armnn::DataLayout layout, uint32_t padLeft = 0, uint32_t padTop = 0, uint32_t padRight = 0, @@ -377,7 +378,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( // At this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -401,7 +402,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( LayerTestResult ret(outputTensorInfo); // At this point if we require it permute the expected output - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(outputData.size()); armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data()); @@ -417,7 +418,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( // Permute the kernel if necessary boost::multi_array kernel = boost::multi_array(originalKernel); - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data()); } @@ -440,7 +441,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( data.m_Parameters.m_PadTop = padTop; data.m_Parameters.m_PadBottom = padBottom; data.m_Parameters.m_BiasEnabled = biasEnabled; - data.m_Parameters.m_DataLayout = layout.GetDataLayout(); + data.m_Parameters.m_DataLayout = layout; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); @@ -466,7 +467,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( float qScale, int32_t qOffset, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { unsigned int inputHeight = 3; unsigned int inputWidth = 3; @@ -511,7 +512,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( })); // at this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -533,7 +534,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( 0.f, 0.f, 0.f, -1.f, 0.f, -1.f, })); - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(kernelData.size()); armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, kernelData.data(), tmp.data()); @@ -557,7 +558,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( } LayerTestResult ret(outputTensorInfo); - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(outputImage.size()); armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data()); @@ -589,7 +590,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( data.m_Parameters.m_PadTop = 0; data.m_Parameters.m_PadBottom = 0; data.m_Parameters.m_BiasEnabled = biasEnabled; - data.m_Parameters.m_DataLayout = layout.GetDataLayout(); + data.m_Parameters.m_DataLayout = layout; std::unique_ptr workload = workloadFactory.CreateDepthwiseConvolution2d(data, info); inputHandle->Allocate(); @@ -611,7 +612,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( float qScale, int32_t qOffset, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { unsigned int depthMultiplier = 2; @@ -672,7 +673,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( std::vector inputData = originalInputData; // at this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, originalInputData.data(), inputData.data()); } @@ -709,7 +710,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( 0, 0, 0 })); std::vector kernelData = originalKernelData; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernelData.data(), kernelData.data()); } @@ -762,7 +763,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( LayerTestResult ret(outputTensorInfo); std::vector outputImage = originalOutputImage; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, originalOutputImage.data(), outputImage.data()); } @@ -792,7 +793,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( data.m_Parameters.m_PadTop = 1; data.m_Parameters.m_PadBottom = 1; data.m_Parameters.m_BiasEnabled = biasEnabled; - data.m_Parameters.m_DataLayout = layout.GetDataLayout(); + data.m_Parameters.m_DataLayout = layout; std::unique_ptr workload = workloadFactory.CreateDepthwiseConvolution2d(data, info); inputHandle->Allocate(); diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index caa4f4065d..ecd09ca024 100755 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -109,7 +109,7 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( float qScale, int32_t qOffset, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Use common single-batch 3-channel 16x8 image. armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType()); @@ -192,7 +192,7 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( float qScale, int32_t qOffset, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path. @@ -315,7 +315,7 @@ LayerTestResult SimpleConvolution2d3x5Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return SimpleConvolution2d3x5TestCommon(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout); } @@ -324,7 +324,7 @@ LayerTestResult SimpleConvolution2d3x5Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return SimpleConvolution2d3x5TestCommon(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); } @@ -333,7 +333,7 @@ LayerTestResult SimpleConvolution2d3x3Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return SimpleConvolution2d3x3TestCommon(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout); } @@ -355,7 +355,7 @@ LayerTestResult SimpleConvolution2d3x3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return SimpleConvolution2d3x3TestCommon(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); } @@ -364,7 +364,7 @@ template LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout, + const armnn::DataLayout layout, float qScale, int32_t qOffset) { @@ -426,7 +426,7 @@ template LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout, + const armnn::DataLayout layout, float qScale, int32_t qOffset) { @@ -485,7 +485,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( float qScale, int32_t qOffset, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Use a single-batch 2-channel 5x5 image as input. armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType()); @@ -673,7 +673,7 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon( workloadFactory, memoryManager, layout, 0.0f, 0); @@ -682,7 +682,7 @@ Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest( LayerTestResult Convolution2dAsymmetricPaddingTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return SimpleConvolution2dAsymmetricPaddingTestCommon( workloadFactory, memoryManager, layout, 0.0f, 0); @@ -692,7 +692,7 @@ LayerTestResult DepthwiseConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout); @@ -710,7 +710,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return DepthwiseConvolution2dDepthMul1TestImpl( workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout); @@ -720,7 +720,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return DepthwiseConvolution2dAsymmetricTestCommon( workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout); @@ -730,7 +730,7 @@ LayerTestResult DepthwiseConvolution2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); @@ -740,7 +740,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return DepthwiseConvolution2dDepthMul1TestImpl( workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); @@ -775,7 +775,7 @@ LayerTestResult CompareDepthwiseConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { return CompareDepthwiseConvolution2dTestImpl(workloadFactory, memoryManager, refWorkloadFactory, layout); } @@ -784,13 +784,13 @@ template LayerTestResult CompareDepthwiseConvolution2dTest( armnn::IWorkloadFactory&, const armnn::IBackendInternal::IMemoryManagerSharedPtr&, armnn::IWorkloadFactory&, - const armnn::DataLayoutIndexed&); + const armnn::DataLayout); template LayerTestResult CompareDepthwiseConvolution2dTest( armnn::IWorkloadFactory&, const armnn::IBackendInternal::IMemoryManagerSharedPtr&, armnn::IWorkloadFactory&, - const armnn::DataLayoutIndexed&); + const armnn::DataLayout); LayerTestResult SimpleNormalizationAcrossTest( armnn::IWorkloadFactory& workloadFactory, @@ -3857,7 +3857,7 @@ LayerTestResult Concatenation4dDiffShapeDim3Test( LayerTestResult ResizeBilinearNopTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); @@ -3875,7 +3875,7 @@ LayerTestResult ResizeBilinearNopTest( }); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -3911,7 +3911,7 @@ LayerTestResult ResizeBilinearNopTest( LayerTestResult SimpleResizeBilinearTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 1, 1, dataLayout); @@ -3937,7 +3937,7 @@ LayerTestResult SimpleResizeBilinearTest( }); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -3977,7 +3977,7 @@ LayerTestResult SimpleResizeBilinearTest( LayerTestResult ResizeBilinearSqMinTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); @@ -4003,7 +4003,7 @@ LayerTestResult ResizeBilinearSqMinTest( }); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -4043,7 +4043,7 @@ LayerTestResult ResizeBilinearSqMinTest( LayerTestResult ResizeBilinearMinTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 3, 5, dataLayout); const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 3, dataLayout); @@ -4067,7 +4067,7 @@ LayerTestResult ResizeBilinearMinTest( }); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -4107,7 +4107,7 @@ LayerTestResult ResizeBilinearMinTest( LayerTestResult ResizeBilinearMagTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 3, 2, dataLayout); const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 3, 5, dataLayout); @@ -4133,7 +4133,7 @@ LayerTestResult ResizeBilinearMagTest( }); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -4235,7 +4235,7 @@ LayerTestResult L2NormalizationTestImpl( const armnn::TensorShape& inputOutputTensorShape, const std::vector& inputValues, const std::vector& expectedOutputValues, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32); const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32); @@ -4243,7 +4243,7 @@ LayerTestResult L2NormalizationTestImpl( // at this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; std::vector inputData = inputValues; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -4254,7 +4254,7 @@ LayerTestResult L2NormalizationTestImpl( LayerTestResult result(outputTensorInfo); std::vector expectedOutputData = expectedOutputValues; - if (layout.GetDataLayout() == armnn::DataLayout::NHWC) + if (layout == armnn::DataLayout::NHWC) { std::vector tmp(expectedOutputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data()); @@ -4266,7 +4266,7 @@ LayerTestResult L2NormalizationTestImpl( std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); armnn::L2NormalizationQueueDescriptor descriptor; - descriptor.m_Parameters.m_DataLayout = layout.GetDataLayout(); + descriptor.m_Parameters.m_DataLayout = layout; armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); @@ -4729,7 +4729,7 @@ LayerTestResult PadFloat324dTest( LayerTestResult L2Normalization1dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Width: 1 // Height: 1 @@ -4799,7 +4799,7 @@ LayerTestResult L2Normalization1dTest( LayerTestResult L2Normalization2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Width: 5 // Height: 1 @@ -4844,7 +4844,7 @@ LayerTestResult L2Normalization2dTest( LayerTestResult L2Normalization3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Width: 3 // Height: 4 @@ -4909,7 +4909,7 @@ LayerTestResult L2Normalization3dTest( LayerTestResult L2Normalization4dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout) + const armnn::DataLayout layout) { // Width: 3 // Height: 4 @@ -6357,7 +6357,7 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4Uint8Test( LayerTestResult SimpleMaxPooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { return SimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } @@ -6365,7 +6365,7 @@ LayerTestResult SimpleMaxPooling2dTest( LayerTestResult SimpleMaxPooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { return SimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } @@ -6373,7 +6373,7 @@ LayerTestResult SimpleMaxPooling2dUint8Test( LayerTestResult SimpleAveragePooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { return SimpleAveragePooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } @@ -6381,7 +6381,7 @@ LayerTestResult SimpleAveragePooling2dTest( LayerTestResult SimpleAveragePooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { return SimpleAveragePooling2dTestCommon( workloadFactory, memoryManager, dataLayout, 0.5, -1); @@ -6413,7 +6413,7 @@ LayerTestResult LargeTensorsAveragePooling2dUint8Test( LayerTestResult SimpleL2Pooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { return SimpleL2Pooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } @@ -6421,7 +6421,7 @@ LayerTestResult SimpleL2Pooling2dTest( LayerTestResult SimpleL2Pooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout) + const armnn::DataLayout dataLayout) { return SimpleL2Pooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 15d0853006..498cfb7fe0 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -58,13 +58,13 @@ LayerTestResult SimpleConvolution2d3x5Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult SimpleConvolution2d3x3Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult SimpleConvolution2d3x3NhwcTest( armnn::IWorkloadFactory& workloadFactory, @@ -75,12 +75,12 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult Convolution2dAsymmetricPaddingTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult Convolution1dTest( armnn::IWorkloadFactory& workloadFactory, @@ -96,7 +96,7 @@ LayerTestResult DepthwiseConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult DepthwiseConvolution2dDepthNhwcTest( armnn::IWorkloadFactory& workloadFactory, @@ -107,13 +107,13 @@ LayerTestResult DepthwiseConvolution2dDepthMul1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult DepthwiseConvolution2dAsymmetricTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2Test( armnn::IWorkloadFactory& workloadFactory, @@ -154,22 +154,22 @@ LayerTestResult IgnorePaddingMaxPooling2dSize3Uint8Test( LayerTestResult SimpleMaxPooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult SimpleMaxPooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult SimpleAveragePooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult SimpleAveragePooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2Test( armnn::IWorkloadFactory& workloadFactory, @@ -203,12 +203,12 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3Uint8Test( LayerTestResult SimpleL2Pooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult SimpleL2Pooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult L2Pooling2dSize3Stride1Test( armnn::IWorkloadFactory& workloadFactory, @@ -464,7 +464,7 @@ LayerTestResult CompareDepthwiseConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult CompareNormalizationTest( armnn::IWorkloadFactory& workloadFactory, @@ -606,32 +606,32 @@ LayerTestResult CompareBoundedReLuTest( LayerTestResult ResizeBilinearNopTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); // Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image. LayerTestResult SimpleResizeBilinearTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); // Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a // multiple of output dimensions). LayerTestResult ResizeBilinearSqMinTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); // Tests the resize bilinear for minification (output dimensions smaller than input dimensions). LayerTestResult ResizeBilinearMinTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions). LayerTestResult ResizeBilinearMagTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout); + const armnn::DataLayout dataLayout); LayerTestResult BatchNormTest( armnn::IWorkloadFactory& workloadFactory, @@ -648,22 +648,22 @@ LayerTestResult FakeQuantizationTest( LayerTestResult L2Normalization1dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult L2Normalization2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult L2Normalization3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult L2Normalization4dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult ConstantTest( armnn::IWorkloadFactory& workloadFactory, @@ -765,25 +765,25 @@ LayerTestResult SimpleConvolution2d3x5Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult SimpleConvolution2d3x3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult DepthwiseConvolution2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult DepthwiseConvolution2dDepthMul1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled, - const armnn::DataLayoutIndexed& layout); + const armnn::DataLayout layout); LayerTestResult ConstantLinearActivationUint8Test( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp index 2e851faaa7..9050fc64a6 100644 --- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp @@ -34,10 +34,11 @@ LayerTestResult SimplePooling2dTestImpl( const boost::multi_array& input, const boost::multi_array& outputExpected) { - const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout; - auto heightIndex = dataLayout.GetHeightIndex(); - auto widthIndex = dataLayout.GetWidthIndex(); - auto channelsIndex = dataLayout.GetChannelsIndex(); + const armnn::DataLayout dataLayout = descriptor.m_DataLayout; + const armnn::DataLayoutIndexed dimensionIndices = dataLayout; + auto heightIndex = dimensionIndices.GetHeightIndex(); + auto widthIndex = dimensionIndices.GetWidthIndex(); + auto channelsIndex = dimensionIndices.GetChannelsIndex(); unsigned int inputHeight = boost::numeric_cast(input.shape()[heightIndex]); unsigned int inputWidth = boost::numeric_cast(input.shape()[widthIndex]); @@ -240,7 +241,7 @@ template LayerTestResult SimpleMaxPooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW, + const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW, float qScale = 1.0f, int32_t qOffset = 0) { @@ -286,7 +287,7 @@ LayerTestResult SimpleMaxPooling2dTestCommon( })); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -309,7 +310,7 @@ template LayerTestResult SimpleAveragePooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW, + armnn::DataLayout dataLayout = armnn::DataLayout::NCHW, float qScale = 1.0f, int32_t qOffset = 0) { @@ -355,7 +356,7 @@ LayerTestResult SimpleAveragePooling2dTestCommon( })); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); @@ -429,7 +430,7 @@ template LayerTestResult SimpleL2Pooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW, + armnn::DataLayout dataLayout = armnn::DataLayout::NCHW, float qScale = 1.0f, int32_t qOffset = 0) { @@ -466,7 +467,7 @@ LayerTestResult SimpleL2Pooling2dTestCommon( })); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + if (dataLayout == armnn::DataLayout::NHWC) { std::vector tmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp index c0f6cdfe6e..fa0be85100 100644 --- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp @@ -23,20 +23,18 @@ arm_compute::Status ClBatchNormalizationValidate(const TensorInfo& input, const TensorInfo& gamma, const BatchNormalizationDescriptor &desc) { - const DataLayout dataLayout = desc.m_DataLayout.GetDataLayout(); - const arm_compute::TensorInfo aclInputInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(input, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(input, desc.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(output, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(output, desc.m_DataLayout); const arm_compute::TensorInfo aclMeanInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(mean, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(mean, desc.m_DataLayout); const arm_compute::TensorInfo aclVarInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(var, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(var, desc.m_DataLayout); const arm_compute::TensorInfo aclBetaInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(beta, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(beta, desc.m_DataLayout); const arm_compute::TensorInfo aclGammaInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(gamma, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(gamma, desc.m_DataLayout); return arm_compute::CLBatchNormalizationLayer::validate(&aclInputInfo, &aclOutputInfo, @@ -68,7 +66,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload( arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp index 74e40ecbf6..2226e09964 100644 --- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp @@ -18,12 +18,10 @@ arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor) { - const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, - descriptor.m_DataLayout.GetDataLayout()); - const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, - descriptor.m_DataLayout.GetDataLayout()); + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); - unsigned int axis = (descriptor.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0; + unsigned int axis = (descriptor.m_DataLayout == DataLayout::NCHW) ? 2 : 0; return arm_compute::CLL2NormalizeLayer::validate(&aclInput, &aclOutput, axis); } @@ -37,11 +35,11 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); - unsigned int axis = (m_Data.m_Parameters.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0; + unsigned int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0; m_Layer.configure(&input, &output, axis); } diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp index f4b0356bd1..607bc58f38 100644 --- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp +++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp @@ -19,10 +19,8 @@ arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor) { - const arm_compute::TensorInfo aclInputInfo = - BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); - const arm_compute::TensorInfo aclOutputInfo = - BuildArmComputeTensorInfo(output, descriptor.m_DataLayout.GetDataLayout()); + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor); @@ -38,7 +36,7 @@ ClPooling2dWorkload::ClPooling2dWorkload( arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp index 3e2f895b36..ac7d60c23b 100644 --- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp @@ -26,7 +26,7 @@ ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinea arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp index e576c64752..a8181f66d9 100644 --- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp @@ -21,20 +21,18 @@ arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor) { - const DataLayout dataLayout = descriptor.m_DataLayout.GetDataLayout(); - const arm_compute::TensorInfo aclInputInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(input, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(output, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); const arm_compute::TensorInfo aclMeanInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(mean, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout); const arm_compute::TensorInfo aclVarInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(var, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout); const arm_compute::TensorInfo aclBetaInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(beta, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout); const arm_compute::TensorInfo aclGammaInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(gamma, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout); return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo, &aclOutputInfo, @@ -54,7 +52,7 @@ NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload( arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index ca3b36e16f..df8caefbd2 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -14,12 +14,11 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor) { - const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); - const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo( - output, descriptor.m_DataLayout.GetDataLayout()); + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::NormalizationLayerInfo normalizationInfo = - CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout()); + CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout); return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo); } @@ -34,14 +33,14 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization( - info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout())); + info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout)); } void NeonL2NormalizationFloatWorkload::Execute() const diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp index b8acf36028..9c8f71ad19 100644 --- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp @@ -18,9 +18,9 @@ arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo& input, const Pooling2dDescriptor& descriptor) { const arm_compute::TensorInfo aclInputInfo = - BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); + BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = - BuildArmComputeTensorInfo(output, descriptor.m_DataLayout.GetDataLayout()); + BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor); @@ -36,7 +36,7 @@ NeonPooling2dWorkload::NeonPooling2dWorkload( arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp index 091d092777..5d932cc991 100644 --- a/src/backends/reference/workloads/BatchToSpaceNd.hpp +++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp @@ -5,6 +5,7 @@ #pragma once +#include #include #include #include diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp index 4b15c1da6d..0b9f8f70c8 100644 --- a/src/backends/reference/workloads/ConvImpl.hpp +++ b/src/backends/reference/workloads/ConvImpl.hpp @@ -10,6 +10,8 @@ #include +#include + #include #include diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp index 92b229d3bb..3da88515a2 100644 --- a/src/backends/reference/workloads/ResizeBilinear.hpp +++ b/src/backends/reference/workloads/ResizeBilinear.hpp @@ -7,6 +7,8 @@ #include +#include + namespace armnn { diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp index 48c212764f..6d0d004336 100644 --- a/src/backends/reference/workloads/SpaceToBatchNd.cpp +++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp @@ -5,6 +5,8 @@ #include "SpaceToBatchNd.hpp" +#include + namespace armnn { diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp index 53504d6365..5593ba6a5e 100644 --- a/src/backends/reference/workloads/TensorBufferArrayView.hpp +++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp @@ -7,6 +7,8 @@ #include +#include + #include namespace armnn -- cgit v1.2.1