aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.cpp12
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp3
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp1
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp1
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp8
-rw-r--r--src/armnn/layers/ResizeBilinearLayer.cpp5
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp6
-rw-r--r--src/armnn/test/CreateWorkload.hpp19
-rw-r--r--src/armnn/test/TensorHelpers.hpp6
9 files changed, 29 insertions, 32 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index dade1f70c3..f37b1a0c66 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -51,16 +51,4 @@ char const* GetLayerTypeAsCString(LayerType type)
}
}
-// Definition in include/armnn/Types.hpp
-bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed)
-{
- return dataLayout == indexed.GetDataLayout();
-}
-
-// Definition in include/armnn/Types.hpp
-bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout)
-{
- return indexed.GetDataLayout() == dataLayout;
-}
-
}
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index aff818e664..e1b78b21df 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -10,6 +10,7 @@
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -50,7 +51,7 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- const DataLayoutIndexed & dataLayout = m_Param.m_DataLayout;
+ const DataLayoutIndexed dataLayout = m_Param.m_DataLayout;
const TensorShape& inputShape = inputShapes[0];
unsigned int inBatchSize = inputShape[0];
unsigned int channelSize = inputShape[dataLayout.GetChannelsIndex()];
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index f3597e2914..4d3553f0cc 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -8,6 +8,7 @@
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index f356e39335..6ad32a756a 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -8,6 +8,7 @@
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 821c011cd0..24b7a69c49 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -7,6 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -33,13 +34,14 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
{
BOOST_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
+ const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
// If we support multiple batch dimensions in the future, then this assert will need to change.
BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
- unsigned int inWidth = inputShape[m_Param.m_DataLayout.GetWidthIndex()];
- unsigned int inHeight = inputShape[m_Param.m_DataLayout.GetHeightIndex()];
- unsigned int inChannels = inputShape[m_Param.m_DataLayout.GetChannelsIndex()];
+ unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
+ unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
+ unsigned int inChannels = inputShape[dimensionIndices.GetChannelsIndex()];
unsigned int inBatchSize = inputShape[0];
bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0);
diff --git a/src/armnn/layers/ResizeBilinearLayer.cpp b/src/armnn/layers/ResizeBilinearLayer.cpp
index 69ce69eea5..f72ccfce90 100644
--- a/src/armnn/layers/ResizeBilinearLayer.cpp
+++ b/src/armnn/layers/ResizeBilinearLayer.cpp
@@ -7,6 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -34,10 +35,10 @@ std::vector<TensorShape> ResizeBilinearLayer::InferOutputShapes(const std::vecto
{
BOOST_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
-
+ const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
unsigned int outWidth = m_Param.m_TargetWidth;
unsigned int outHeight = m_Param.m_TargetHeight;
- unsigned int outChannels = inputShape[m_Param.m_DataLayout.GetChannelsIndex()];
+ unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()];
unsigned int outBatch = inputShape[0];
TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index cc93886e50..658945ef08 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -9,6 +9,7 @@
#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -48,8 +49,9 @@ std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vecto
1U,
std::multiplies<>());
- unsigned int heightIndex = m_Param.m_DataLayout.GetHeightIndex();
- unsigned int widthIndex = m_Param.m_DataLayout.GetWidthIndex();
+ DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
+ unsigned int heightIndex = dimensionIndices.GetHeightIndex();
+ unsigned int widthIndex = dimensionIndices.GetWidthIndex();
std::pair<unsigned int, unsigned int> heightPad = m_Param.m_PadList[0];
std::pair<unsigned int, unsigned int> widthPad = m_Param.m_PadList[1];
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 1a9bd56ac5..b8ba72f271 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -8,6 +8,7 @@
#include <boost/cast.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -133,11 +134,11 @@ std::unique_ptr<WorkloadType> CreateArithmeticWorkloadTest(armnn::IWorkloadFacto
template <typename BatchNormalizationFloat32Workload, armnn::DataType DataType>
std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkloadTest(
- armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayoutIndexed dataLayout = DataLayout::NCHW)
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
{
TensorShape tensorShape;
- switch (dataLayout.GetDataLayout())
+ switch (dataLayout)
{
case DataLayout::NHWC:
tensorShape = { 2, 4, 4, 3 };
@@ -184,7 +185,7 @@ std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkl
BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
+ BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -842,13 +843,12 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory&
template <typename ResizeBilinearWorkload, armnn::DataType DataType>
std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph,
- DataLayoutIndexed dataLayout =
- DataLayout::NCHW)
+ DataLayout dataLayout = DataLayout::NCHW)
{
TensorShape inputShape;
TensorShape outputShape;
- switch (dataLayout.GetDataLayout()) {
+ switch (dataLayout) {
case DataLayout::NHWC:
inputShape = { 2, 4, 4, 3 };
outputShape = { 2, 2, 2, 3 };
@@ -861,8 +861,9 @@ std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::
// Creates the layer we're testing.
ResizeBilinearDescriptor resizeDesc;
- resizeDesc.m_TargetWidth = outputShape[dataLayout.GetWidthIndex()];
- resizeDesc.m_TargetHeight = outputShape[dataLayout.GetHeightIndex()];
+ DataLayoutIndexed dimensionIndices = dataLayout;
+ resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
+ resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()];
resizeDesc.m_DataLayout = dataLayout;
Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
@@ -883,7 +884,7 @@ std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::
ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
+ BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
// Returns so we can do extra, backend-specific tests.
return workload;
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index 4501c3532c..238232ae96 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -216,9 +216,9 @@ armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
unsigned int numberOfChannels,
unsigned int height,
unsigned int width,
- const armnn::DataLayoutIndexed& dataLayout)
+ const armnn::DataLayout dataLayout)
{
- switch (dataLayout.GetDataLayout())
+ switch (dataLayout)
{
case armnn::DataLayout::NCHW:
return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
@@ -226,6 +226,6 @@ armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
default:
throw armnn::InvalidArgumentException("unknown data layout ["
- + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
+ + std::to_string(static_cast<int>(dataLayout)) + "]");
}
}