aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2018-10-11 12:39:05 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:53 +0100
commit595408218a0e17f04d91ff131a8227a4f352ff61 (patch)
tree515316e28abbed3dce388bc99be5ff52bc042765 /src/backends/reference/workloads
parenta0944791e87902b35e06c306c7b1a6f0f5bbfbd7 (diff)
downloadarmnn-595408218a0e17f04d91ff131a8227a4f352ff61.tar.gz
IVGCVSW-1978: Support NHWC for ResizeBilinear CpuRef
* Adds implementation to plumb DataLayout parameter for ResizeBilinear on CpuRef. * Adds unit tests to execute ResizeBilinear on CpuRef using the NHWC data layout. * Adds DataLayoutIndexed API, allowing easy access to the Channels, Height and Width of a tensor based on its data layout. This reduces code duplication. * Refactors original ResizeBilinear implementation and tests to use the DataLayoutIndexed API when required. Change-Id: Ic2b8916cdd2e370d070175547079d774daf6d7bf
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r--src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp3
-rw-r--r--src/backends/reference/workloads/ResizeBilinear.cpp20
-rw-r--r--src/backends/reference/workloads/ResizeBilinear.hpp6
-rw-r--r--src/backends/reference/workloads/TensorBufferArrayView.hpp21
4 files changed, 32 insertions, 18 deletions
diff --git a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp b/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
index 50ee7a218a..8d86bdcf34 100644
--- a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
@@ -23,7 +23,8 @@ void RefResizeBilinearFloat32Workload::Execute() const
ResizeBilinear(GetInputTensorDataFloat(0, m_Data),
inputInfo,
GetOutputTensorDataFloat(0, m_Data),
- outputInfo);
+ outputInfo,
+ m_Data.m_Parameters.m_DataLayout);
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/ResizeBilinear.cpp
index 0bce3c7ed8..e098c6c20d 100644
--- a/src/backends/reference/workloads/ResizeBilinear.cpp
+++ b/src/backends/reference/workloads/ResizeBilinear.cpp
@@ -25,27 +25,31 @@ inline float Lerp(float a, float b, float w)
}
-void ResizeBilinear(const float* in, const TensorInfo& inputInfo, float* out, const TensorInfo& outputInfo)
+void ResizeBilinear(const float* in,
+ const TensorInfo& inputInfo,
+ float* out,
+ const TensorInfo& outputInfo,
+ DataLayoutIndexed dataLayout)
{
// We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
// image is projected into the input image to figure out the interpolants and weights. Note that this
// will yield different results than if projecting the centre of output texels.
const unsigned int batchSize = inputInfo.GetShape()[0];
- const unsigned int channelCount = inputInfo.GetShape()[1];
+ const unsigned int channelCount = inputInfo.GetShape()[dataLayout.GetChannelsIndex()];
- const unsigned int inputHeight = inputInfo.GetShape()[2];
- const unsigned int inputWidth = inputInfo.GetShape()[3];
- const unsigned int outputHeight = outputInfo.GetShape()[2];
- const unsigned int outputWidth = outputInfo.GetShape()[3];
+ const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
+ const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
+ const unsigned int outputHeight = outputInfo.GetShape()[dataLayout.GetHeightIndex()];
+ const unsigned int outputWidth = outputInfo.GetShape()[dataLayout.GetWidthIndex()];
// How much to scale pixel coordinates in the output image, to get the corresponding pixel coordinates
// in the input image.
const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
- TensorBufferArrayView<const float> input(inputInfo.GetShape(), in);
- TensorBufferArrayView<float> output(outputInfo.GetShape(), out);
+ TensorBufferArrayView<const float> input(inputInfo.GetShape(), in, dataLayout);
+ TensorBufferArrayView<float> output(outputInfo.GetShape(), out, dataLayout);
for (unsigned int n = 0; n < batchSize; ++n)
{
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
index 847b8e8bef..92b229d3bb 100644
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ b/src/backends/reference/workloads/ResizeBilinear.hpp
@@ -10,6 +10,10 @@
namespace armnn
{
-void ResizeBilinear(const float* in, const TensorInfo& inputInfo, float* out, const TensorInfo& outputInfo);
+void ResizeBilinear(const float* in,
+ const TensorInfo& inputInfo,
+ float* out,
+ const TensorInfo& outputInfo,
+ DataLayoutIndexed dataLayout = DataLayout::NCHW);
} //namespace armnn
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index e19810ca87..aba44e4593 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -15,28 +15,33 @@ template <typename DataType>
class TensorBufferArrayView
{
public:
- TensorBufferArrayView(const TensorShape& shape, DataType* data)
+ TensorBufferArrayView(const TensorShape& shape, DataType* data, DataLayoutIndexed dataLayout = DataLayout::NCHW)
: m_Shape(shape)
, m_Data(data)
+ , m_DataLayout(dataLayout)
{
}
DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
{
- BOOST_ASSERT( b < m_Shape[0] || (m_Shape[0] == 0 && b == 0) );
- BOOST_ASSERT( c < m_Shape[1] || (m_Shape[1] == 0 && c == 0) );
- BOOST_ASSERT( h < m_Shape[2] || (m_Shape[2] == 0 && h == 0) );
- BOOST_ASSERT( w < m_Shape[3] || (m_Shape[3] == 0 && w == 0) );
+ BOOST_ASSERT( b < m_Shape[0] || ( m_Shape[0] == 0 && b == 0 ) );
+ BOOST_ASSERT( c < m_Shape[m_DataLayout.GetChannelsIndex()] ||
+ ( m_Shape[m_DataLayout.GetChannelsIndex()] == 0 && c == 0) );
+ BOOST_ASSERT( h < m_Shape[m_DataLayout.GetHeightIndex()] ||
+ ( m_Shape[m_DataLayout.GetHeightIndex()] == 0 && h == 0) );
+ BOOST_ASSERT( w < m_Shape[m_DataLayout.GetWidthIndex()] ||
+ ( m_Shape[m_DataLayout.GetWidthIndex()] == 0 && w == 0) );
return m_Data[b * m_Shape[1] * m_Shape[2] * m_Shape[3]
- + c * m_Shape[2] * m_Shape[3]
- + h * m_Shape[3]
+ + c * m_Shape[m_DataLayout.GetHeightIndex()] * m_Shape[m_DataLayout.GetWidthIndex()]
+ + h * m_Shape[m_DataLayout.GetWidthIndex()]
+ w];
}
private:
const TensorShape m_Shape;
- DataType* m_Data;
+ DataType* m_Data;
+ DataLayoutIndexed m_DataLayout;
};
} //namespace armnn