aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/DepthToSpace.cpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-09-23 19:11:59 +0100
committerKevin May <kevin.may@arm.com>2019-09-24 14:30:22 +0000
commit73f66421c6719c007325b69d51ca73dd58eb8c33 (patch)
tree69a6311caa36255be5d3b55383a2067f9d4d96fb /src/backends/reference/workloads/DepthToSpace.cpp
parent1d18e6f74f7a76befe64fa34e7dbfaa8580875ee (diff)
downloadarmnn-73f66421c6719c007325b69d51ca73dd58eb8c33.tar.gz
IVGCVSW-3885 Add reference workload for DepthToSpace
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Id937dc4425884ad1985dcdfaae8bf3fb64f0c766
Diffstat (limited to 'src/backends/reference/workloads/DepthToSpace.cpp')
-rw-r--r--src/backends/reference/workloads/DepthToSpace.cpp79
1 files changed, 79 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
new file mode 100644
index 0000000000..046bd47a6f
--- /dev/null
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -0,0 +1,79 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DepthToSpace.hpp"
+
+#include <DataLayoutIndexed.hpp>
+#include <Permute.hpp>
+
+#include <boost/assert.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+void DepthToSpace(const TensorInfo& inputInfo,
+ const DepthToSpaceDescriptor& descriptor,
+ const void* inputData,
+ void* outputData,
+ unsigned int dataTypeSize)
+{
+ const unsigned int blockSize = descriptor.m_BlockSize;
+ BOOST_ASSERT(blockSize != 0u);
+
+ const TensorShape& inputShape = inputInfo.GetShape();
+ const unsigned int batches = inputShape[0];
+
+ armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
+ const unsigned int inDepth = inputShape[dataLayoutIndexed.GetChannelsIndex()];
+ const unsigned int inHeight = inputShape[dataLayoutIndexed.GetHeightIndex()];
+ const unsigned int inWidth = inputShape[dataLayoutIndexed.GetWidthIndex()];
+
+ const unsigned int outDepth = inDepth / (blockSize * blockSize);
+
+ // The 4D input data can be interpreted as 6D (implicitly reshaped) as follows:
+ //
+ // [batch, block size, block size, inDepth, inHeight, inWidth] for NCHW and
+ // [batch, inHeight, inWidth, blockSize, blockSize, outDepth] for NHWC.
+ //
+ // DepthToSpace can then be implemented as a permutation in 6D resulting in
+ // the following shapes:
+ //
+ // [batch, outDepth, inHeight, blockSize, inWidth, blockSize] for NCHW and
+ // [batch, inHeight, blockSize, inWidth, blockSize, outDepth] for NHWC.
+ //
+ // NOTE:
+ // Since 6D tensors are not currently supported, in practice we need to handle each
+ // batch separately and execute 5D permutations
+
+ TensorShape permDestShape;
+ std::initializer_list<unsigned int> permVector;
+ if (descriptor.m_DataLayout == DataLayout::NCHW)
+ {
+ permDestShape = TensorShape({ outDepth, inHeight, blockSize, inWidth, blockSize });
+ permVector = { 2, 4, 0, 1, 3 };
+ }
+ else
+ {
+ permDestShape = TensorShape({ inHeight, blockSize, inWidth, blockSize, outDepth });
+ permVector = { 0, 2, 1, 3, 4 };
+ }
+
+ const unsigned int numElementsPerBatch = inputShape.GetNumElements() / batches;
+
+ for (unsigned int batchIndex = 0u; batchIndex < batches; ++batchIndex)
+ {
+ const uintptr_t batchDataOffset = batchIndex * (numElementsPerBatch * dataTypeSize);
+
+ armnnUtils::Permute(permDestShape,
+ PermutationVector(permVector),
+ static_cast<const void*>(reinterpret_cast<const uint8_t*>(inputData) + batchDataOffset),
+ static_cast<void*>(reinterpret_cast<uint8_t*>(outputData) + batchDataOffset),
+ dataTypeSize);
+ }
+}
+
+} // namespace armnn