aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp11
-rw-r--r--src/backends/reference/workloads/ArgMinMax.cpp2
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp5
-rw-r--r--src/backends/reference/workloads/BatchNormImpl.cpp2
-rw-r--r--src/backends/reference/workloads/BatchToSpaceNd.hpp11
-rw-r--r--src/backends/reference/workloads/ConvImpl.hpp4
-rw-r--r--src/backends/reference/workloads/Decoders.hpp5
-rw-r--r--src/backends/reference/workloads/DepthToSpace.cpp4
-rw-r--r--src/backends/reference/workloads/Encoders.hpp3
-rw-r--r--src/backends/reference/workloads/InstanceNorm.cpp2
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp2
-rw-r--r--src/backends/reference/workloads/Pooling2d.cpp3
-rw-r--r--src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp6
-rw-r--r--src/backends/reference/workloads/RefL2NormalizationWorkload.cpp108
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.cpp3
-rw-r--r--src/backends/reference/workloads/Resize.hpp5
-rw-r--r--src/backends/reference/workloads/Softmax.cpp2
-rw-r--r--src/backends/reference/workloads/SpaceToBatchNd.cpp2
-rw-r--r--src/backends/reference/workloads/SpaceToDepth.cpp2
-rw-r--r--src/backends/reference/workloads/TensorBufferArrayView.hpp4
-rw-r--r--src/backends/reference/workloads/TransposeConvolution2d.cpp2
23 files changed, 102 insertions, 94 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 5a84d8ac78..05684dcbc0 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -6,17 +6,20 @@
#include "RefLayerSupport.hpp"
#include "RefBackendId.hpp"
-#include <DataLayoutIndexed.hpp>
-#include <InternalTypes.hpp>
-#include <LayerSupportCommon.hpp>
-
#include <armnn/Types.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/BackendRegistry.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
+#include <InternalTypes.hpp>
+#include <LayerSupportCommon.hpp>
+
#include <backendsCommon/LayerSupportRules.hpp>
+
#include <backendsCommon/test/WorkloadTestUtils.hpp>
+#include <boost/cast.hpp>
#include <boost/core/ignore_unused.hpp>
#include <vector>
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index 2687a4e8ac..76616f1465 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -5,7 +5,7 @@
#include "ArgMinMax.hpp"
-#include <TensorUtils.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <boost/numeric/conversion/cast.hpp>
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 95a31fbdd6..ca5110c2fd 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,9 +5,10 @@
#pragma once
-#include "FloatingPointConverter.hpp"
-
#include <armnn/ArmNN.hpp>
+
+#include <armnnUtils/FloatingPointConverter.hpp>
+
#include <ResolveType.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/BatchNormImpl.cpp b/src/backends/reference/workloads/BatchNormImpl.cpp
index b80af8c937..e742c7280f 100644
--- a/src/backends/reference/workloads/BatchNormImpl.cpp
+++ b/src/backends/reference/workloads/BatchNormImpl.cpp
@@ -8,7 +8,7 @@
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp
index b757d3709c..a375aaae52 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.hpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp
@@ -5,15 +5,16 @@
#pragma once
-#include <armnn/Types.hpp>
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
#include "BaseIterator.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnn/Types.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
namespace armnn
{
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 7dba760d87..562fd3e296 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -13,13 +13,11 @@
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <DataLayoutIndexed.hpp>
-
#include <cmath>
#include <limits>
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index dcd498cb63..b9cd7f9573 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -6,8 +6,9 @@
#pragma once
#include "BaseIterator.hpp"
-#include "FloatingPointConverter.hpp"
-#include "TensorUtils.hpp"
+
+#include <armnnUtils/FloatingPointConverter.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index d500e9b100..91ca160ae2 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -5,8 +5,8 @@
#include "DepthToSpace.hpp"
-#include <DataLayoutIndexed.hpp>
-#include <Permute.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnnUtils/Permute.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 5c0cffa7ca..0d578d68de 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -6,7 +6,8 @@
#pragma once
#include "BaseIterator.hpp"
-#include "TensorUtils.hpp"
+
+#include <armnnUtils/TensorUtils.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/InstanceNorm.cpp b/src/backends/reference/workloads/InstanceNorm.cpp
index 9d6532fa6e..08c555f0e8 100644
--- a/src/backends/reference/workloads/InstanceNorm.cpp
+++ b/src/backends/reference/workloads/InstanceNorm.cpp
@@ -8,7 +8,7 @@
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 3fa3dc0d8c..ddf5674fb8 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -5,7 +5,7 @@
#include "LogSoftmax.hpp"
-#include <TensorUtils.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index cf83f8ce2b..ea8f4ee7e7 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -4,11 +4,12 @@
//
#include "Pooling2d.hpp"
-#include "DataLayoutIndexed.hpp"
#include <armnn/Exceptions.hpp>
#include <armnn/Types.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <limits>
diff --git a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
index 886e77a31b..ef813eb69b 100644
--- a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
@@ -4,9 +4,9 @@
//
#include "RefConvertFp16ToFp32Workload.hpp"
-
#include "RefWorkloadUtils.hpp"
-#include "FloatingPointConverter.hpp"
+
+#include <armnnUtils/FloatingPointConverter.hpp>
#include <Half.hpp>
diff --git a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
index 33270ad10f..559901f2f2 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
@@ -4,12 +4,12 @@
//
#include "RefConvertFp32ToFp16Workload.hpp"
-
-#include "FloatingPointConverter.hpp"
#include "RefWorkloadUtils.hpp"
#include "Profiling.hpp"
-#include "Half.hpp"
+#include <armnnUtils/FloatingPointConverter.hpp>
+
+#include <Half.hpp>
namespace armnn
{
diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
index 3764b9a49a..6fec1abe6f 100644
--- a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
@@ -4,13 +4,13 @@
//
#include "RefL2NormalizationWorkload.hpp"
-
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
-#include "DataLayoutIndexed.hpp"
-#include "Profiling.hpp"
+#include <Profiling.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -21,80 +21,80 @@ using namespace armnnUtils;
namespace armnn
{
RefL2NormalizationWorkload::RefL2NormalizationWorkload(
- const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : BaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
+ const L2NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
- void RefL2NormalizationWorkload::Execute() const
- {
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
+void RefL2NormalizationWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
- const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
- auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+ auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+ auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
- DataLayoutIndexed dataLayout(m_Data.m_Parameters.m_DataLayout);
+ DataLayoutIndexed dataLayout(m_Data.m_Parameters.m_DataLayout);
- const TensorShape& shape = inputInfo.GetShape();
- unsigned int paddedShapeArray[4];
- const int idxShift = 4 - boost::numeric_cast<int>(shape.GetNumDimensions());
+ const TensorShape& shape = inputInfo.GetShape();
+ unsigned int paddedShapeArray[4];
+ const int idxShift = 4 - boost::numeric_cast<int>(shape.GetNumDimensions());
- const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
- paddedShapeArray[0] = batches;
+ const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
+ paddedShapeArray[0] = batches;
- const int channelsIdx = boost::numeric_cast<int>(dataLayout.GetChannelsIndex());
- const unsigned int channels = (channelsIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(channelsIdx - idxShift)]
- : 1;
- paddedShapeArray[channelsIdx] = channels;
+ const int channelsIdx = boost::numeric_cast<int>(dataLayout.GetChannelsIndex());
+ const unsigned int channels = (channelsIdx - idxShift >= 0)
+ ? shape[boost::numeric_cast<unsigned int>(channelsIdx - idxShift)]
+ : 1;
+ paddedShapeArray[channelsIdx] = channels;
- const int heightIdx = boost::numeric_cast<int>(dataLayout.GetHeightIndex());
- const unsigned int height = (heightIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(heightIdx - idxShift)]
- : 1;
- paddedShapeArray[heightIdx] = height;
+ const int heightIdx = boost::numeric_cast<int>(dataLayout.GetHeightIndex());
+ const unsigned int height = (heightIdx - idxShift >= 0)
+ ? shape[boost::numeric_cast<unsigned int>(heightIdx - idxShift)]
+ : 1;
+ paddedShapeArray[heightIdx] = height;
- const int widthIdx = boost::numeric_cast<int>(dataLayout.GetWidthIndex());
- const unsigned int width = (widthIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(widthIdx - idxShift)]
- : 1;
- paddedShapeArray[widthIdx] = width;
+ const int widthIdx = boost::numeric_cast<int>(dataLayout.GetWidthIndex());
+ const unsigned int width = (widthIdx - idxShift >= 0)
+ ? shape[boost::numeric_cast<unsigned int>(widthIdx - idxShift)]
+ : 1;
+ paddedShapeArray[widthIdx] = width;
- const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
+ const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
- for (unsigned int n = 0; n < batches; ++n)
+ for (unsigned int n = 0; n < batches; ++n)
+ {
+ for (unsigned int c = 0; c < channels; ++c)
{
- for (unsigned int c = 0; c < channels; ++c)
+ for (unsigned int h = 0; h < height; ++h)
{
- for (unsigned int h = 0; h < height; ++h)
+ for (unsigned int w = 0; w < width; ++w)
{
- for (unsigned int w = 0; w < width; ++w)
+ float reduction = 0.0;
+ for (unsigned int d = 0; d < channels; ++d)
{
- float reduction = 0.0;
- for (unsigned int d = 0; d < channels; ++d)
- {
- unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
+ unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
- (*inputDecoder)[inputIndex];
- const float value = inputDecoder->Get();
- reduction += value * value;
- }
+ (*inputDecoder)[inputIndex];
+ const float value = inputDecoder->Get();
+ reduction += value * value;
+ }
- unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
+ unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
- float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
+ float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
- const float scale = 1.0f / sqrtf(maximum);
+ const float scale = 1.0f / sqrtf(maximum);
- (*inputDecoder)[index];
- (*outputEncoder)[index];
- outputEncoder->Set(inputDecoder->Get() * scale);
- }
+ (*inputDecoder)[index];
+ (*outputEncoder)[index];
+ outputEncoder->Set(inputDecoder->Get() * scale);
}
}
}
}
+}
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index 8ff2d9cf92..0427baf475 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -4,14 +4,14 @@
//
#include "RefNormalizationWorkload.hpp"
-
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
#include <Profiling.hpp>
#include <boost/log/trivial.hpp>
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp
index 4d43b7e560..4e7b76bf0a 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.cpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp
@@ -6,7 +6,8 @@
#include "RefPermuteWorkload.hpp"
#include "RefWorkloadUtils.hpp"
-#include <Permute.hpp>
+#include <armnnUtils/Permute.hpp>
+
#include <ResolveType.hpp>
namespace armnn
diff --git a/src/backends/reference/workloads/Resize.hpp b/src/backends/reference/workloads/Resize.hpp
index 8bd8999e5d..4c357946d9 100644
--- a/src/backends/reference/workloads/Resize.hpp
+++ b/src/backends/reference/workloads/Resize.hpp
@@ -6,9 +6,10 @@
#pragma once
#include "BaseIterator.hpp"
+
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
namespace armnn
{
@@ -20,4 +21,4 @@ void Resize(Decoder<float>& in,
armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW,
ResizeMethod resizeMethod = ResizeMethod::NearestNeighbor);
-} //namespace armnn
+} // namespace armnn
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index f745d816c2..5036389a10 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -5,7 +5,7 @@
#include "Softmax.hpp"
-#include <TensorUtils.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <cmath>
#include <vector>
diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp
index 0bc2396973..b6bab17367 100644
--- a/src/backends/reference/workloads/SpaceToBatchNd.cpp
+++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp
@@ -5,7 +5,7 @@
#include "SpaceToBatchNd.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
using namespace armnnUtils;
diff --git a/src/backends/reference/workloads/SpaceToDepth.cpp b/src/backends/reference/workloads/SpaceToDepth.cpp
index 4a4f4183d9..604a9051af 100644
--- a/src/backends/reference/workloads/SpaceToDepth.cpp
+++ b/src/backends/reference/workloads/SpaceToDepth.cpp
@@ -5,7 +5,7 @@
#include "SpaceToDepth.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
using namespace armnnUtils;
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index c06407241d..e03c42fe60 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -7,9 +7,9 @@
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <boost/assert.hpp>
namespace armnn
{
diff --git a/src/backends/reference/workloads/TransposeConvolution2d.cpp b/src/backends/reference/workloads/TransposeConvolution2d.cpp
index 5662c58809..5698014181 100644
--- a/src/backends/reference/workloads/TransposeConvolution2d.cpp
+++ b/src/backends/reference/workloads/TransposeConvolution2d.cpp
@@ -5,7 +5,7 @@
#include "TransposeConvolution2d.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
namespace armnn
{