aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp10
-rw-r--r--src/backends/reference/workloads/Pad.cpp44
-rw-r--r--src/backends/reference/workloads/Pad.hpp9
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp17
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.hpp25
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.hpp2
7 files changed, 67 insertions, 42 deletions
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 4d157d4f8b..b1f9d6c70a 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -249,7 +249,7 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean(
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<RefPadWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<RefPadFloat32Workload, RefPadUint8Workload>(descriptor, info);
}
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 259739ba55..9f044cdbaf 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -213,9 +213,13 @@ ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest)
// Pad
-ARMNN_AUTO_TEST_CASE(Pad2d, Pad2dTest)
-ARMNN_AUTO_TEST_CASE(Pad3d, Pad3dTest)
-ARMNN_AUTO_TEST_CASE(Pad4d, Pad4dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat324d, PadFloat324dTest)
+
+ARMNN_AUTO_TEST_CASE(PadUint82d, PadUint82dTest)
+ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
+ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest)
ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest)
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 5c859317dd..a50fa23c6c 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -5,24 +5,22 @@
#include "Pad.hpp"
#include "backends/WorkloadData.hpp"
-
#include <boost/numeric/conversion/cast.hpp>
#include "TensorBufferArrayView.hpp"
-
#include <cmath>
#include <cstddef>
#include <functional>
#include <limits>
#include <cassert>
-
namespace armnn
{
+template <typename T>
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const float* inputData,
- float* outData)
+ const T* inputData,
+ T* outData)
{
unsigned int numOutputElements = outputInfo.GetNumElements();
@@ -30,10 +28,12 @@ void Pad(const TensorInfo& inputInfo,
TensorShape inputShape = inputInfo.GetShape();
unsigned int numInputDimensions = inputShape.GetNumDimensions();
+
#ifndef NDEBUG
- unsigned int numOutputDimensions = outputShape.GetNumDimensions();
+ unsigned int numOutputDimensions = outputShape.GetNumDimensions();
assert(numInputDimensions == numOutputDimensions);
+
#endif
unsigned int inputBatches = 0;
@@ -51,29 +51,27 @@ void Pad(const TensorInfo& inputInfo,
}
switch(numInputDimensions) {
+
case 1:
inputWidth = inputShape[0];
for (unsigned int w = 0; w < inputWidth ; w++)
{
-
outData[w+std::get<0>(m_PadList[0])] = inputData[w];
-
}
break;
+
case 2 :
inputHeight = inputShape[0];
inputWidth = inputShape[1];
-
outputHeight = outputShape[0];
outputWidth = outputShape[1];
for (unsigned int h = 0; h < inputHeight; h++)
{
-
for (unsigned int w = 0; w < inputWidth ; w++)
{
outData[(h+std::get<0>(m_PadList[0]))*outputWidth
@@ -82,25 +80,22 @@ void Pad(const TensorInfo& inputInfo,
}
break;
+
case 3 :
inputChannels = inputShape[0];
inputHeight = inputShape[1];
inputWidth = inputShape[2];
-
outputChannels = outputShape[0];
outputHeight = outputShape[1];
outputWidth = outputShape[2];
for (unsigned int c = 0; c < inputChannels; c++)
{
-
for (unsigned int h = 0; h < inputHeight; h++)
{
-
for (unsigned int w = 0; w < inputWidth ; w++)
{
-
outData[(c+std::get<0>(m_PadList[0]))*outputHeight*outputWidth
+ (h+std::get<0>(m_PadList[1]))*outputWidth
+ (w+std::get<0>(m_PadList[2]))] = inputData[c * inputHeight * inputWidth
@@ -111,13 +106,13 @@ void Pad(const TensorInfo& inputInfo,
}
break;
+
case 4 :
inputBatches = inputShape[0];
inputChannels = inputShape[1];
inputHeight = inputShape[2];
inputWidth = inputShape[3];
-
outputChannels = outputShape[1];
outputHeight = outputShape[2];
outputWidth = outputShape[3];
@@ -126,13 +121,10 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int c = 0; c < inputChannels; c++)
{
-
for (unsigned int h = 0; h < inputHeight; h++)
{
-
for (unsigned int w = 0; w < inputWidth ; w++)
{
-
outData[(b+std::get<0>(m_PadList[0])) * outputChannels * outputHeight * outputWidth
+ (c+std::get<0>(m_PadList[1])) * outputHeight * outputWidth
+ (h+std::get<0>(m_PadList[2])) * outputWidth
@@ -141,7 +133,6 @@ void Pad(const TensorInfo& inputInfo,
+ c * inputHeight * inputWidth
+ h * inputWidth
+ w];
-
}
}
}
@@ -150,9 +141,20 @@ void Pad(const TensorInfo& inputInfo,
break;
default :
+
break;
}
-
}
-} //namespace armnn
+template void Pad<float>(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ const float* inputData,
+ float* outData);
+template void Pad<uint8_t>(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ const uint8_t* inputData,
+ uint8_t* outData);
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/Pad.hpp b/src/backends/reference/workloads/Pad.hpp
index ed80ef8eb0..42318d6fcf 100644
--- a/src/backends/reference/workloads/Pad.hpp
+++ b/src/backends/reference/workloads/Pad.hpp
@@ -12,9 +12,10 @@
namespace armnn
{
+template <typename T>
void Pad(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const float* inputData,
- float* outData);
+ const TensorInfo& outputInfo,
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ const T* inputData,
+ T* outData);
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 233fbe4f34..b41c2de9af 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -10,28 +10,31 @@
#include "Profiling.hpp"
+#include "TypeUtils.hpp"
+
#include <vector>
namespace armnn
{
-RefPadWorkload::RefPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info)
- :BaseWorkload<PadQueueDescriptor>(descriptor, info) {}
-
-
-void RefPadWorkload::Execute() const
+template <armnn::DataType DataType>
+void RefPadWorkload<DataType>::Execute() const
{
+ using T = ResolveType<DataType>;
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPadWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- const float* inputData = GetInputTensorDataFloat(0, m_Data);
- float* outputData = GetOutputTensorDataFloat(0, m_Data);
+ const T* inputData = GetInputTensorData<T>(0, m_Data);
+ T* outputData = GetOutputTensorData<T>(0, m_Data);
Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData);
}
+template class RefPadWorkload<DataType::Float32>;
+template class RefPadWorkload<DataType::QuantisedAsymm8>;
+
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp
index 7ff117d6a5..938fcf2004 100644
--- a/src/backends/reference/workloads/RefPadWorkload.hpp
+++ b/src/backends/reference/workloads/RefPadWorkload.hpp
@@ -5,17 +5,32 @@
#pragma once
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
+#include <backends/Workload.hpp>
+#include <backends/WorkloadData.hpp>
+
+#include <armnn/TypesUtils.hpp>
namespace armnn
{
-class RefPadWorkload : public BaseWorkload<PadQueueDescriptor>
+template <armnn::DataType DataType>
+class RefPadWorkload : public TypedWorkload<PadQueueDescriptor, DataType>
{
public:
- explicit RefPadWorkload (const PadQueueDescriptor& descriptor, const WorkloadInfo& info);
- virtual void Execute() const override;
+
+ static const std::string& GetName()
+ {
+ static const std::string name = std::string("RefPad") + GetDataTypeName(DataType) + "Workload";
+ return name;
+ }
+
+ using TypedWorkload<PadQueueDescriptor, DataType>::m_Data;
+ using TypedWorkload<PadQueueDescriptor, DataType>::TypedWorkload;
+
+ void Execute() const override;
};
+using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
+using RefPadUint8Workload = RefPadWorkload<DataType::QuantisedAsymm8>;
+
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp
index 841a080dfd..50caa3e9ec 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.hpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp
@@ -31,4 +31,4 @@ using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
using RefPermuteUint8Workload = RefPermuteWorkload<DataType::QuantisedAsymm8>;
-} //namespace armnn
+} //namespace armnn \ No newline at end of file