aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/Pad.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-06-04 10:32:18 +0100
committerSadik Armagan <sadik.armagan@arm.com>2020-06-05 09:48:47 +0000
commit041b3c0d416d94b1aa4efa41960808b7e5e61f25 (patch)
tree029b7f3b072d94df8cc92e92ab0a0300130c7902 /src/backends/reference/workloads/Pad.cpp
parentfa0d838fd7be90b6e1d53aa62da14ccc0cf0eacc (diff)
downloadarmnn-041b3c0d416d94b1aa4efa41960808b7e5e61f25.tar.gz
IVGCVSW-4904 Refactor CpuRef PAD Workload
* Refactored templated workload creation * Added int8_t unit tests Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I018b7f9f20496b5f9d7445901fe0d3dd04199cd0
Diffstat (limited to 'src/backends/reference/workloads/Pad.cpp')
-rw-r--r--src/backends/reference/workloads/Pad.cpp184
1 files changed, 88 insertions, 96 deletions
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 1b634145fc..1f8b674c3a 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -4,97 +4,125 @@
//
#include "Pad.hpp"
-#include <backendsCommon/WorkloadData.hpp>
-#include "TensorBufferArrayView.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
#include "Encoders.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnnUtils/TensorUtils.hpp>
+
#include <cmath>
#include <cstddef>
#include <functional>
#include <limits>
#include <cassert>
+namespace
+{
+
+void FillOutputWithPadValue(armnn::Encoder<float>& outputData,
+ const float padValue,
+ const unsigned int numOutputElements)
+{
+ for (unsigned int i = 0; i < numOutputElements; ++i)
+ {
+ outputData[i];
+ outputData.Set(padValue);
+ }
+}
+
+} // anonymous namespace
+
namespace armnn
{
-template <typename T>
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_padList,
- const T* inputData,
- T* outData,
- const float padValue)
+ const PadQueueDescriptor& data)
{
+ auto padList = data.m_Parameters.m_PadList;
+ auto padValue = data.m_Parameters.m_PadValue;
+
unsigned int numOutputElements = outputInfo.GetNumElements();
TensorShape outputShape = outputInfo.GetShape();
- TensorShape inputShape = inputInfo.GetShape();
+ TensorShape inputShape = inputInfo.GetShape();
unsigned int numInputDimensions = inputShape.GetNumDimensions();
- #ifndef NDEBUG
+#ifndef NDEBUG
unsigned int numOutputDimensions = outputShape.GetNumDimensions();
assert(numInputDimensions == numOutputDimensions);
- #endif
+#endif
- unsigned int inputBatches = 0;
+ unsigned int inputBatches = 0;
unsigned int inputChannels = 0;
- unsigned int inputHeight = 0;
- unsigned int inputWidth = 0;
+ unsigned int inputHeight = 0;
+ unsigned int inputWidth = 0;
unsigned int outputChannels = 0;
- unsigned int outputHeight = 0;
- unsigned int outputWidth = 0;
+ unsigned int outputHeight = 0;
+ unsigned int outputWidth = 0;
- T convertedPadValue = static_cast<T>(padValue);
+ auto inputData = MakeDecoder<float>(inputInfo, data.m_Inputs[0]->Map());
+ auto outData = MakeEncoder<float>(outputInfo, data.m_Outputs[0]->Map());
- for (unsigned int i = 0; i < numOutputElements; ++i)
+ // Fill the output tensor with Pad value first
+ if (outputInfo.IsQuantized())
{
- outData[i] = convertedPadValue;
+ // For Quantized types Pad Value should not be quantized with scale and offset of the tensor info
+ auto temporaryInfo = TensorInfo(outputInfo.GetShape(), outputInfo.GetDataType(), 1.0f, 0);
+ auto outputData = MakeEncoder<float>(temporaryInfo, data.m_Outputs[0]->Map());
+ FillOutputWithPadValue(*outputData, padValue, numOutputElements);
}
+ else
+ {
+ FillOutputWithPadValue(*outData, padValue, numOutputElements);
+ }
+
+ Decoder<float>& input = *inputData;
+ Encoder<float>& output = *outData;
switch(numInputDimensions) {
case 1:
-
inputWidth = inputShape[0];
-
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[w+std::get<0>(m_padList[0])] = inputData[w];
+ input[w];
+ auto inputValue = input.Get();
+ auto outputIndex = w + std::get<0>(padList[0]);
+ output[outputIndex];
+ output.Set(inputValue);
}
break;
-
case 2 :
-
inputHeight = inputShape[0];
- inputWidth = inputShape[1];
- outputHeight = outputShape[0];
+ inputWidth = inputShape[1];
outputWidth = outputShape[1];
for (unsigned int h = 0; h < inputHeight; h++)
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(h+std::get<0>(m_padList[0]))*outputWidth
- + (w+std::get<0>(m_padList[1]))] = inputData[h * inputWidth + w];
+ input[h * inputWidth + w];
+ auto inputValue = input.Get();
+ auto outputIndex = (h + std::get<0>(padList[0])) * outputWidth + (w + std::get<0>(padList[1]));
+ output[outputIndex];
+ output.Set(inputValue);
}
}
break;
-
case 3 :
-
inputChannels = inputShape[0];
- inputHeight = inputShape[1];
- inputWidth = inputShape[2];
- outputChannels = outputShape[0];
- outputHeight = outputShape[1];
- outputWidth = outputShape[2];
+ inputHeight = inputShape[1];
+ inputWidth = inputShape[2];
+ outputHeight = outputShape[1];
+ outputWidth = outputShape[2];
for (unsigned int c = 0; c < inputChannels; c++)
{
@@ -102,26 +130,26 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(c+std::get<0>(m_padList[0]))*outputHeight*outputWidth
- + (h+std::get<0>(m_padList[1]))*outputWidth
- + (w+std::get<0>(m_padList[2]))] = inputData[c * inputHeight * inputWidth
- + h * inputWidth
- + w];
+ input[c * inputHeight * inputWidth + h * inputWidth + w];
+ auto inputValue = input.Get();
+ auto outputIndex = (c + std::get<0>(padList[0])) * outputHeight * outputWidth
+ + (h + std::get<0>(padList[1])) * outputWidth
+ + (w + std::get<0>(padList[2]));
+ output[outputIndex];
+ output.Set(inputValue);
}
}
}
break;
-
case 4 :
-
- inputBatches = inputShape[0];
- inputChannels = inputShape[1];
- inputHeight = inputShape[2];
- inputWidth = inputShape[3];
+ inputBatches = inputShape[0];
+ inputChannels = inputShape[1];
+ inputHeight = inputShape[2];
+ inputWidth = inputShape[3];
outputChannels = outputShape[1];
- outputHeight = outputShape[2];
- outputWidth = outputShape[3];
+ outputHeight = outputShape[2];
+ outputWidth = outputShape[3];
for (unsigned int b = 0; b < inputBatches; b++)
{
@@ -131,63 +159,27 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(b+std::get<0>(m_padList[0])) * outputChannels * outputHeight * outputWidth
- + (c+std::get<0>(m_padList[1])) * outputHeight * outputWidth
- + (h+std::get<0>(m_padList[2])) * outputWidth
- + (w+std::get<0>(m_padList[3]))] = inputData[b * inputChannels * inputHeight
- * inputWidth
- + c * inputHeight * inputWidth
- + h * inputWidth
- + w];
+ input[b * inputChannels * inputHeight * inputWidth
+ + c * inputHeight * inputWidth
+ + h * inputWidth
+ + w];
+ auto inputValue = input.Get();
+ auto outputIndex = (b + std::get<0>(padList[0]))
+ * outputChannels * outputHeight * outputWidth
+ + (c + std::get<0>(padList[1])) * outputHeight * outputWidth
+ + (h + std::get<0>(padList[2])) * outputWidth
+ + (w + std::get<0>(padList[3]));
+ output[outputIndex];
+ output.Set(inputValue);
}
}
}
}
break;
-
default :
-
break;
}
}
-template void Pad<BFloat16>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const BFloat16* inputData,
- BFloat16* outData,
- const float padValue);
-
-template void Pad<float>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const float* inputData,
- float* outData,
- const float padValue);
-template void Pad<Half>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const Half* inputData,
- Half* outData,
- const float padValue);
-template void Pad<uint8_t>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const uint8_t* inputData,
- uint8_t* outData,
- const float padValue);
-template void Pad<int8_t>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const int8_t* inputData,
- int8_t* outData,
- const float padValue);
-template void Pad<int16_t>(const TensorInfo& inputInfo,
- const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
- const int16_t* inputData,
- int16_t* outData,
- const float padValue);
-
} //namespace armnn \ No newline at end of file