aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r--src/backends/reference/workloads/BatchToSpaceNd.cpp100
-rw-r--r--src/backends/reference/workloads/BatchToSpaceNd.hpp22
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt6
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp28
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp22
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp30
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp23
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp4
8 files changed, 234 insertions, 1 deletions
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
new file mode 100644
index 0000000000..bedf8418ef
--- /dev/null
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -0,0 +1,100 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchToSpaceNd.hpp"
+
+#include "RefWorkloadUtils.hpp"
+
+#include <armnn/Types.hpp>
+
+#include <boost/assert.hpp>
+
+namespace armnn
+{
+
+inline unsigned int Offset(const TensorShape& shape, unsigned int batch, unsigned int height, unsigned int width,
+ unsigned int channels, const DataLayoutIndexed& dataLayout)
+{
+ if (dataLayout.GetDataLayout() == DataLayout::NHWC)
+ {
+ return ((batch * shape[dataLayout.GetHeightIndex()] + height) * shape[dataLayout.GetWidthIndex()] + width) *
+ shape[dataLayout.GetChannelsIndex()] + channels;
+ }
+ else
+ {
+ return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) *
+ shape[dataLayout.GetHeightIndex()] + height) *
+ shape[dataLayout.GetWidthIndex()] + width;
+ }
+}
+
+void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
+ const TensorInfo& inputTensorInfo,
+ const TensorInfo& outputTensorInfo,
+ const std::vector<unsigned int>& blockShape,
+ const std::vector<std::vector<unsigned int>>& cropsData,
+ const float* inputData,
+ float* outputData)
+{
+ TensorShape inputShape = inputTensorInfo.GetShape();
+ unsigned int inputNumDims = inputShape.GetNumDimensions();
+ if (inputNumDims != 4)
+ {
+ throw armnn::InvalidArgumentException("Expected Input with 4 Dimensions");
+ }
+
+ TensorShape outputShape = outputTensorInfo.GetShape();
+ unsigned int outputNumDims = outputShape.GetNumDimensions();
+ if (outputNumDims != 4)
+ {
+ throw armnn::InvalidArgumentException("Expected Output with 4 Dimensions");
+ }
+
+ const unsigned int inputBatchSize = inputShape[0];
+ const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
+
+ const unsigned int outputBatchSize = outputShape[0];
+ const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
+ const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
+
+ const unsigned int blockShapeHeight = blockShape[0];
+ const unsigned int blockShapeWidth = blockShape[1];
+
+ const unsigned int cropsTop = cropsData[0][0];
+ const unsigned int cropsLeft = cropsData[1][0];
+
+ for (unsigned int inBatch = 0; inBatch < inputBatchSize; ++inBatch)
+ {
+ const unsigned int outBatch = inBatch % outputBatchSize;
+ const unsigned int spatialOffset = inBatch / outputBatchSize;
+
+ for (unsigned int inH = 0; inH < inputTensorInfo.GetShape()[dataLayout.GetHeightIndex()]; ++inH) {
+ const unsigned int outH = inH * blockShapeHeight + spatialOffset / blockShapeWidth - cropsTop;
+
+ if (outH >= outputHeight)
+ {
+ continue;
+ }
+
+ for (unsigned int inW = 0; inW < inputTensorInfo.GetShape()[dataLayout.GetWidthIndex()]; ++inW) {
+ const unsigned int outW = inW * blockShapeWidth + spatialOffset % blockShapeWidth - cropsLeft;
+
+ if (outW >= outputWidth)
+ {
+ continue;
+ }
+
+ for (unsigned int c = 0; c < channels; c++)
+ {
+ unsigned int outOffset = Offset(outputShape, outBatch, outH, outW, c, dataLayout);
+ unsigned int inOffset = Offset(inputShape, inBatch, inH, inW, c, dataLayout);
+ outputData[outOffset] = inputData[inOffset];
+ }
+ }
+ }
+ }
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp
new file mode 100644
index 0000000000..7923ceadd0
--- /dev/null
+++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
+ const TensorInfo& inputTensorInfo,
+ const TensorInfo& outputTensorInfo,
+ const std::vector<unsigned int>& blockShape,
+ const std::vector<std::vector<unsigned int>>& cropsData,
+ const float* inputData,
+ float* outputData);
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 4cef2d0771..1c38509ca0 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -9,6 +9,8 @@ list(APPEND armnnRefBackendWorkloads_sources
ArithmeticFunction.cpp
ArithmeticFunction.hpp
BatchNormImpl.hpp
+ BatchToSpaceNd.cpp
+ BatchToSpaceNd.hpp
Broadcast.cpp
Broadcast.hpp
ConvImpl.cpp
@@ -32,6 +34,10 @@ list(APPEND armnnRefBackendWorkloads_sources
RefBatchNormalizationFloat32Workload.hpp
RefBatchNormalizationUint8Workload.cpp
RefBatchNormalizationUint8Workload.hpp
+ RefBatchToSpaceNdFloat32Workload.cpp
+ RefBatchToSpaceNdFloat32Workload.hpp
+ RefBatchToSpaceNdUint8Workload.cpp
+ RefBatchToSpaceNdUint8Workload.hpp
RefConstantFloat32Workload.cpp
RefConstantFloat32Workload.hpp
RefConstantUint8Workload.cpp
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp
new file mode 100644
index 0000000000..bf246c272f
--- /dev/null
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchToSpaceNd.hpp"
+#include "Profiling.hpp"
+#include "RefBatchToSpaceNdFloat32Workload.hpp"
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+void RefBatchToSpaceNdFloat32Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdFloat32Workload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+ const float* inputData = GetInputTensorDataFloat(0, m_Data);
+ float* outputData = GetOutputTensorDataFloat(0, m_Data);
+
+ BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape,
+ m_Data.m_Parameters.m_Crops, inputData, outputData);
+}
+
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp
new file mode 100644
index 0000000000..4977772c82
--- /dev/null
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn {
+
+class RefBatchToSpaceNdFloat32Workload : public Float32Workload<BatchToSpaceNdQueueDescriptor>
+{
+
+public:
+ using Float32Workload<BatchToSpaceNdQueueDescriptor>::Float32Workload;
+
+ virtual void Execute() const override;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp
new file mode 100644
index 0000000000..a66bcd42de
--- /dev/null
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchToSpaceNd.hpp"
+#include "Profiling.hpp"
+#include "RefBatchToSpaceNdUint8Workload.hpp"
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+void RefBatchToSpaceNdUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdUint8Workload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+ auto dequantizedInputData = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
+ float* outputData = GetOutputTensorDataFloat(0, m_Data);
+
+ std::vector<float> results(outputInfo.GetNumElements());
+ BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape,
+ m_Data.m_Parameters.m_Crops, dequantizedInputData.data(), outputData);
+
+ Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp
new file mode 100644
index 0000000000..1f221c2f47
--- /dev/null
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefBatchToSpaceNdUint8Workload : public Uint8Workload<BatchToSpaceNdQueueDescriptor>
+{
+
+public:
+ using Uint8Workload<BatchToSpaceNdQueueDescriptor>::Uint8Workload;
+
+ virtual void Execute() const override;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 03907a6b91..5ea7fe4b58 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -54,4 +54,6 @@
#include "RefConvertFp32ToFp16Workload.hpp"
#include "RefMeanUint8Workload.hpp"
#include "RefMeanFloat32Workload.hpp"
-#include "RefPadWorkload.hpp" \ No newline at end of file
+#include "RefPadWorkload.hpp"
+#include "RefBatchToSpaceNdUint8Workload.hpp"
+#include "RefBatchToSpaceNdFloat32Workload.hpp"