aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-06-04 10:32:18 +0100
committerSadik Armagan <sadik.armagan@arm.com>2020-06-05 09:48:47 +0000
commit041b3c0d416d94b1aa4efa41960808b7e5e61f25 (patch)
tree029b7f3b072d94df8cc92e92ab0a0300130c7902 /src
parentfa0d838fd7be90b6e1d53aa62da14ccc0cf0eacc (diff)
downloadarmnn-041b3c0d416d94b1aa4efa41960808b7e5e61f25.tar.gz
IVGCVSW-4904 Refactor CpuRef PAD Workload
* Refactored templated workload creation * Added int8_t unit tests Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I018b7f9f20496b5f9d7445901fe0d3dd04199cd0
Diffstat (limited to 'src')
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp28
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp16
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp18
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp5
-rw-r--r--src/backends/reference/workloads/Pad.cpp184
-rw-r--r--src/backends/reference/workloads/Pad.hpp12
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp28
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.hpp25
8 files changed, 152 insertions, 164 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 120572ce29..405f4743bc 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -525,3 +525,31 @@ LayerTestResult<armnn::BFloat16, 4> PadBFloat164dTest(
{
return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
}
+
+LayerTestResult<int8_t, 2> PadInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad2dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, 1.0f, 0);
+}
+
+LayerTestResult<int8_t, 2> PadInt82dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad2dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
+}
+
+LayerTestResult<int8_t, 3> PadInt83dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad3dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, 1.0f, 0);
+}
+
+LayerTestResult<int8_t, 4> PadInt84dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, 1.0f, 0);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
index 34aa6c66a3..7b36455063 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
@@ -83,3 +83,19 @@ LayerTestResult<armnn::BFloat16, 3> PadBFloat163dTest(
LayerTestResult<armnn::BFloat16, 4> PadBFloat164dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int8_t, 2> PadInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int8_t, 2> PadInt82dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int8_t, 3> PadInt83dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int8_t, 4> PadInt84dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 4cd0b9e0e3..ba84de49da 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -456,23 +456,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDes
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsQSymmS16(info))
- {
- return std::make_unique<RefPadQSymm16Workload>(descriptor, info);
- }
- else if (IsFloat16(info))
- {
- return std::make_unique<RefPadFloat16Workload>(descriptor, info);
- }
- else if (IsBFloat16(info))
- {
- return std::make_unique<RefPadBFloat16Workload>(descriptor, info);
- }
- else if (IsQAsymmS8(info))
- {
- return std::make_unique<RefPadQAsymmS8Workload>(descriptor, info);
- }
- return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info);
+ return std::make_unique<RefPadWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index fc71eea713..eb2d0c55d9 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1146,6 +1146,11 @@ ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS
ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(PadInt82d, PadInt82dTest)
+ARMNN_AUTO_TEST_CASE(PadInt82dCustomPadding, PadInt82dCustomPaddingTest)
+ARMNN_AUTO_TEST_CASE(PadInt83d, PadInt83dTest)
+ARMNN_AUTO_TEST_CASE(PadInt84d, PadInt84dTest)
+
// Constant
ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8CustomQuantizationScaleAndOffsetTest)
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 1b634145fc..1f8b674c3a 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -4,97 +4,125 @@
//
#include "Pad.hpp"
-#include <backendsCommon/WorkloadData.hpp>
-#include "TensorBufferArrayView.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
#include "Encoders.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnnUtils/TensorUtils.hpp>
+
#include <cmath>
#include <cstddef>
#include <functional>
#include <limits>
#include <cassert>
+namespace
+{
+
+void FillOutputWithPadValue(armnn::Encoder<float>& outputData,
+ const float padValue,
+ const unsigned int numOutputElements)
+{
+ for (unsigned int i = 0; i < numOutputElements; ++i)
+ {
+ outputData[i];
+ outputData.Set(padValue);
+ }
+}
+
+} // anonymous namespace
+
namespace armnn
{
-template <typename T>
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_padList,
- const T* inputData,
- T* outData,
- const float padValue)
+ const PadQueueDescriptor& data)
{
+ auto padList = data.m_Parameters.m_PadList;
+ auto padValue = data.m_Parameters.m_PadValue;
+
unsigned int numOutputElements = outputInfo.GetNumElements();
TensorShape outputShape = outputInfo.GetShape();
- TensorShape inputShape = inputInfo.GetShape();
+ TensorShape inputShape = inputInfo.GetShape();
unsigned int numInputDimensions = inputShape.GetNumDimensions();
- #ifndef NDEBUG
+#ifndef NDEBUG
unsigned int numOutputDimensions = outputShape.GetNumDimensions();
assert(numInputDimensions == numOutputDimensions);
- #endif
+#endif
- unsigned int inputBatches = 0;
+ unsigned int inputBatches = 0;
unsigned int inputChannels = 0;
- unsigned int inputHeight = 0;
- unsigned int inputWidth = 0;
+ unsigned int inputHeight = 0;
+ unsigned int inputWidth = 0;
unsigned int outputChannels = 0;
- unsigned int outputHeight = 0;
- unsigned int outputWidth = 0;
+ unsigned int outputHeight = 0;
+ unsigned int outputWidth = 0;
- T convertedPadValue = static_cast<T>(padValue);
+ auto inputData = MakeDecoder<float>(inputInfo, data.m_Inputs[0]->Map());
+ auto outData = MakeEncoder<float>(outputInfo, data.m_Outputs[0]->Map());
- for (unsigned int i = 0; i < numOutputElements; ++i)
+ // Fill the output tensor with Pad value first
+ if (outputInfo.IsQuantized())
{
- outData[i] = convertedPadValue;
+ // For Quantized types Pad Value should not be quantized with scale and offset of the tensor info
+ auto temporaryInfo = TensorInfo(outputInfo.GetShape(), outputInfo.GetDataType(), 1.0f, 0);
+ auto outputData = MakeEncoder<float>(temporaryInfo, data.m_Outputs[0]->Map());
+ FillOutputWithPadValue(*outputData, padValue, numOutputElements);
}
+ else
+ {
+ FillOutputWithPadValue(*outData, padValue, numOutputElements);
+ }
+
+ Decoder<float>& input = *inputData;
+ Encoder<float>& output = *outData;
switch(numInputDimensions) {
case 1:
-
inputWidth = inputShape[0];
-
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[w+std::get<0>(m_padList[0])] = inputData[w];
+ input[w];
+ auto inputValue = input.Get();
+ auto outputIndex = w + std::get<0>(padList[0]);
+ output[outputIndex];
+ output.Set(inputValue);
}
break;
-
case 2 :
-
inputHeight = inputShape[0];
- inputWidth = inputShape[1];
- outputHeight = outputShape[0];
+ inputWidth = inputShape[1];
outputWidth = outputShape[1];
for (unsigned int h = 0; h < inputHeight; h++)
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(h+std::get<0>(m_padList[0]))*outputWidth
- + (w+std::get<0>(m_padList[1]))] = inputData[h * inputWidth + w];
+ input[h * inputWidth + w];
+ auto inputValue = input.Get();
+ auto outputIndex = (h + std::get<0>(padList[0])) * outputWidth + (w + std::get<0>(padList[1]));
+ output[outputIndex];
+ output.Set(inputValue);
}
}
break;
-
case 3 :
-
inputChannels = inputShape[0];
- inputHeight = inputShape[1];
- inputWidth = inputShape[2];
- outputChannels = outputShape[0];
- outputHeight = outputShape[1];
- outputWidth = outputShape[2];
+ inputHeight = inputShape[1];
+ inputWidth = inputShape[2];
+ outputHeight = outputShape[1];
+ outputWidth = outputShape[2];
for (unsigned int c = 0; c < inputChannels; c++)
{
@@ -102,26 +130,26 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(c+std::get<0>(m_padList[0]))*outputHeight*outputWidth
- + (h+std::get<0>(m_padList[1]))*outputWidth
- + (w+std::get<0>(m_padList[2]))] = inputData[c * inputHeight * inputWidth
- + h * inputWidth
- + w];
+ input[c * inputHeight * inputWidth + h * inputWidth + w];
+ auto inputValue = input.Get();
+ auto outputIndex = (c + std::get<0>(padList[0])) * outputHeight * outputWidth
+ + (h + std::get<0>(padList[1])) * outputWidth
+ + (w + std::get<0>(padList[2]));
+ output[outputIndex];
+ output.Set(inputValue);
}
}
}
break;
-
case 4 :
-
- inputBatches = inputShape[0];
- inputChannels = inputShape[1];
- inputHeight = inputShape[2];
- inputWidth = inputShape[3];
+ inputBatches = inputShape[0];
+ inputChannels = inputShape[1];
+ inputHeight = inputShape[2];
+ inputWidth = inputShape[3];
outputChannels = outputShape[1];
- outputHeight = outputShape[2];
- outputWidth = outputShape[3];
+ outputHeight = outputShape[2];
+ outputWidth = outputShape[3];
for (unsigned int b = 0; b < inputBatches; b++)
{
@@ -131,63 +159,27 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(b+std::get<0>(m_padList[0])) * outputChannels * outputHeight * outputWidth
- + (c+std::get<0>(m_padList[1])) * outputHeight * outputWidth
- + (h+std::get<0>(m_padList[2])) * outputWidth
- + (w+std::get<0>(m_padList[3]))] = inputData[b * inputChannels * inputHeight
- * inputWidth
- + c * inputHeight * inputWidth
- + h * inputWidth
- + w];
+ input[b * inputChannels * inputHeight * inputWidth
+ + c * inputHeight * inputWidth
+ + h * inputWidth
+ + w];
+ auto inputValue = input.Get();
+ auto outputIndex = (b + std::get<0>(padList[0]))
+ * outputChannels * outputHeight * outputWidth
+ + (c + std::get<0>(padList[1])) * outputHeight * outputWidth
+ + (h + std::get<0>(padList[2])) * outputWidth
+ + (w + std::get<0>(padList[3]));
+ output[outputIndex];
+ output.Set(inputValue);
}
}
}
}
break;
-
default :
-
break;
}
}
-template void Pad<BFloat16>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const BFloat16* inputData,
- BFloat16* outData,
- const float padValue);
-
-template void Pad<float>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const float* inputData,
- float* outData,
- const float padValue);
-template void Pad<Half>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const Half* inputData,
- Half* outData,
- const float padValue);
-template void Pad<uint8_t>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const uint8_t* inputData,
- uint8_t* outData,
- const float padValue);
-template void Pad<int8_t>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const int8_t* inputData,
- int8_t* outData,
- const float padValue);
-template void Pad<int16_t>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const int16_t* inputData,
- int16_t* outData,
- const float padValue);
-
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/Pad.hpp b/src/backends/reference/workloads/Pad.hpp
index 429718596e..e7be44e88c 100644
--- a/src/backends/reference/workloads/Pad.hpp
+++ b/src/backends/reference/workloads/Pad.hpp
@@ -5,18 +5,16 @@
#pragma once
-#include "armnn/DescriptorsFwd.hpp"
#include "armnn/Tensor.hpp"
-#include <vector>
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
namespace armnn
{
-template <typename T>
+
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_padList,
- const T* inputData,
- T* outData,
- const float padValue);
+ const PadQueueDescriptor& data);
+
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 6f82d5ffdb..af22c31001 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -6,38 +6,22 @@
#include "RefPadWorkload.hpp"
#include "Pad.hpp"
-#include "RefWorkloadUtils.hpp"
-
#include "Profiling.hpp"
-
-#include <ResolveType.hpp>
-
-#include <vector>
+#include "RefWorkloadUtils.hpp"
namespace armnn
{
-template <armnn::DataType DataType>
-void RefPadWorkload<DataType>::Execute() const
+void RefPadWorkload::Execute() const
{
- using T = ResolveType<DataType>;
-
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPadWorkload_Execute");
- const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- const T* inputData = GetInputTensorData<T>(0, m_Data);
- T* outputData = GetOutputTensorData<T>(0, m_Data);
-
- Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData, m_Data.m_Parameters.m_PadValue);
+ armnn::Pad(inputInfo,
+ outputInfo,
+ m_Data);
}
-template class RefPadWorkload<DataType::BFloat16>;
-template class RefPadWorkload<DataType::Float32>;
-template class RefPadWorkload<DataType::Float16>;
-template class RefPadWorkload<DataType::QAsymmS8>;
-template class RefPadWorkload<DataType::QAsymmU8>;
-template class RefPadWorkload<DataType::QSymmS16>;
-
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp
index 74dcab1967..0b8379a60f 100644
--- a/src/backends/reference/workloads/RefPadWorkload.hpp
+++ b/src/backends/reference/workloads/RefPadWorkload.hpp
@@ -8,33 +8,14 @@
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <armnn/TypesUtils.hpp>
-
namespace armnn
{
-template <armnn::DataType DataType>
-class RefPadWorkload : public TypedWorkload<PadQueueDescriptor, DataType>
+class RefPadWorkload : public BaseWorkload<PadQueueDescriptor>
{
public:
-
- static const std::string& GetName()
- {
- static const std::string name = std::string("RefPad") + GetDataTypeName(DataType) + "Workload";
- return name;
- }
-
- using TypedWorkload<PadQueueDescriptor, DataType>::m_Data;
- using TypedWorkload<PadQueueDescriptor, DataType>::TypedWorkload;
-
- void Execute() const override;
+ using BaseWorkload<PadQueueDescriptor>::BaseWorkload;
+ virtual void Execute() const override;
};
-using RefPadBFloat16Workload = RefPadWorkload<DataType::BFloat16>;
-using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
-using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
-using RefPadQAsymmS8Workload = RefPadWorkload<DataType::QAsymmS8>;
-using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
-using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>;
-
} //namespace armnn