aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2019-06-19 11:47:21 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2019-06-24 23:09:02 +0000
commit34757810f8b734f5f59485a542b56934ad4cc5f0 (patch)
treec9859e1eb90fdf1e3ba4fbeadb3952cac3477277
parent84da38b0f11ca3db0a439e510514be780f3933ff (diff)
downloadarmnn-34757810f8b734f5f59485a542b56934ad4cc5f0.tar.gz
IVGCVSW-3235 Add scalar to use as padding value in Reference Pad
Signed-off-by: David Monahan <david.monahan@arm.com> Change-Id: If050f318fcb7626bbfae1b8737a1d232a4a5a915
-rw-r--r--include/armnn/Descriptors.hpp9
-rw-r--r--src/armnnDeserializer/Deserializer.cpp3
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs1
-rw-r--r--src/armnnSerializer/Serializer.cpp3
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp1
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp70
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp20
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp2
-rw-r--r--src/backends/reference/workloads/Pad.cpp51
-rw-r--r--src/backends/reference/workloads/Pad.hpp5
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp3
11 files changed, 126 insertions, 42 deletions
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 2fda8c1d06..0655d42fbd 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -606,11 +606,11 @@ struct MeanDescriptor
/// A PadDescriptor for the PadLayer.
struct PadDescriptor
{
- PadDescriptor()
+ PadDescriptor() : m_padValue(0)
{}
- PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList)
- : m_PadList(padList)
+ PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
+ : m_PadList(padList), m_padValue(padValue)
{}
/// @brief Specifies the padding for input dimension.
@@ -618,6 +618,9 @@ struct PadDescriptor
/// Second is the number of values to add after the tensor in the dimension.
/// The number of pairs should match the number of dimensions in the input tensor.
std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
+
+ /// Optional value to use for padding, defaults to 0
+ float m_padValue;
};
/// A StridedSliceDescriptor for the StridedSliceLayer.
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 452ef8e5ae..5372606689 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -1390,6 +1390,7 @@ void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex)
auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_PadLayer()->descriptor();
auto flatBufferPadList = flatBufferDescriptor->padList();
+ float padValue = flatBufferDescriptor->padValue();
if (flatBufferPadList->Length() % 2 != 0)
{
@@ -1404,7 +1405,7 @@ void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex)
padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1));
}
- armnn::PadDescriptor descriptor(padList);
+ armnn::PadDescriptor descriptor(padList, padValue);
auto layerName = GetLayerName(graph, layerIndex);
IConnectableLayer* layer = m_Network->AddPadLayer(descriptor, layerName.c_str());
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 83275ca248..7969d10598 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -409,6 +409,7 @@ table PadLayer {
table PadDescriptor {
padList:[uint];
+ padValue:float = 0;
}
table RsqrtLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 126247bb8c..208262b699 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -577,7 +577,8 @@ void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer,
}
auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
- m_flatBufferBuilder.CreateVector(padList));
+ m_flatBufferBuilder.CreateVector(padList),
+ padDescriptor.m_padValue);
auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
flatBufferBaseLayer,
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 8b1594e141..294adec12e 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1712,6 +1712,7 @@ private:
void VerifyDescriptor(const armnn::PadDescriptor& descriptor)
{
BOOST_TEST(descriptor.m_PadList == m_Descriptor.m_PadList);
+ BOOST_TEST(descriptor.m_padValue == m_Descriptor.m_padValue);
}
armnn::PadDescriptor m_Descriptor;
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index d9ae546739..c9a5731190 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5869,13 +5869,14 @@ LayerTestResult<T, 2> Pad2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
- int32_t qOffset)
+ int32_t qOffset,
+ const float customPaddingValue = 0)
{
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
- const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
std::vector<T> inputValues(
QuantizedVector<T>(qScale, qOffset,
@@ -5886,17 +5887,38 @@ LayerTestResult<T, 2> Pad2dTestCommon(
3, 2, 4
}));
- std::vector<T> expectedOutputValues(
- QuantizedVector<T>(qScale, qOffset,
+ const T padValue = ConvertToDataType<T>(customPaddingValue, inputTensorInfo);
+
+ std::vector<T> expectedOutputValues;
+ if (padValue == 0)
{
- 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 4, 8, 6, 0, 0,
- 0, 0, 7, 4, 4, 0, 0,
- 0, 0, 3, 2, 4, 0, 0,
- 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0
- }));
+ expectedOutputValues = (
+ QuantizedVector<T>(qScale, qOffset,
+ {
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 4, 8, 6, 0, 0,
+ 0, 0, 7, 4, 4, 0, 0,
+ 0, 0, 3, 2, 4, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0
+ }));
+ }
+ else
+ {
+ expectedOutputValues = (
+ QuantizedVector<T>(qScale, qOffset,
+ {
+ 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 4, 8, 6, 1, 1,
+ 1, 1, 7, 4, 4, 1, 1,
+ 1, 1, 3, 2, 4, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1
+ }));
+ }
+
auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
@@ -5943,8 +5965,8 @@ LayerTestResult<T, 3> Pad3dTestCommon(
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
- const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
@@ -6028,8 +6050,8 @@ LayerTestResult<T, 4> Pad4dTestCommon(
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
- const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
@@ -6263,6 +6285,13 @@ LayerTestResult<uint8_t, 2> PadUint82dTest(
return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
+LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+}
+
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -6284,6 +6313,13 @@ LayerTestResult<float, 2> PadFloat322dTest(
return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
+LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
+
LayerTestResult<float, 3> PadFloat323dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 25ccfa09f0..be16819990 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -1382,6 +1382,10 @@ LayerTestResult<uint8_t, 2> PadUint82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -1394,6 +1398,10 @@ LayerTestResult<float, 2> PadFloat322dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 3> PadFloat323dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -2022,6 +2030,18 @@ std::vector<T> ConvertToDataType(const std::vector<float>& input,
return output;
}
+// Utility method to convert a single value to the correct type
+template <typename T>
+T ConvertToDataType(const float& value,
+ const armnn::TensorInfo& tensorInfo)
+{
+ std::vector<T> output(1);
+ std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
+ armnn::Encoder<float>& rEncoder = *pEncoder;
+ rEncoder.Set(value);
+ return output[0];
+}
+
template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 2> Rsqrt2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cb9ee4b5a0..9cb8d13adc 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -645,10 +645,12 @@ ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefault
// Pad
ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest)
ARMNN_AUTO_TEST_CASE(PadFloat324d, PadFloat324dTest)
ARMNN_AUTO_TEST_CASE(PadUint82d, PadUint82dTest)
+ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 7a928a1336..1e58124627 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -5,8 +5,10 @@
#include "Pad.hpp"
#include "backendsCommon/WorkloadData.hpp"
-#include <boost/numeric/conversion/cast.hpp>
#include "TensorBufferArrayView.hpp"
+#include "Encoders.hpp"
+
+#include <boost/numeric/conversion/cast.hpp>
#include <cmath>
#include <cstddef>
#include <functional>
@@ -15,12 +17,25 @@
namespace armnn
{
+
+template <typename T>
+T ConvertToDataType(const float& value,
+ const armnn::TensorInfo& tensorInfo)
+{
+ std::vector<T> output(1);
+ std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
+ armnn::Encoder<float>& rEncoder = *pEncoder;
+ rEncoder.Set(value);
+ return output[0];
+}
+
template <typename T>
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ std::vector<std::pair<unsigned int, unsigned int>> m_padList,
const T* inputData,
- T* outData)
+ T* outData,
+ const float padValue)
{
unsigned int numOutputElements = outputInfo.GetNumElements();
@@ -45,9 +60,11 @@ void Pad(const TensorInfo& inputInfo,
unsigned int outputHeight = 0;
unsigned int outputWidth = 0;
+ T convertedPadValue = ConvertToDataType<T>(padValue, inputInfo);
+
for (unsigned int i = 0; i < numOutputElements; ++i)
{
- outData[i] = 0;
+ outData[i] = convertedPadValue;
}
switch(numInputDimensions) {
@@ -58,7 +75,7 @@ void Pad(const TensorInfo& inputInfo,
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[w+std::get<0>(m_PadList[0])] = inputData[w];
+ outData[w+std::get<0>(m_padList[0])] = inputData[w];
}
break;
@@ -74,8 +91,8 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(h+std::get<0>(m_PadList[0]))*outputWidth
- + (w+std::get<0>(m_PadList[1]))] = inputData[h * inputWidth + w];
+ outData[(h+std::get<0>(m_padList[0]))*outputWidth
+ + (w+std::get<0>(m_padList[1]))] = inputData[h * inputWidth + w];
}
}
@@ -96,9 +113,9 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(c+std::get<0>(m_PadList[0]))*outputHeight*outputWidth
- + (h+std::get<0>(m_PadList[1]))*outputWidth
- + (w+std::get<0>(m_PadList[2]))] = inputData[c * inputHeight * inputWidth
+ outData[(c+std::get<0>(m_padList[0]))*outputHeight*outputWidth
+ + (h+std::get<0>(m_padList[1]))*outputWidth
+ + (w+std::get<0>(m_padList[2]))] = inputData[c * inputHeight * inputWidth
+ h * inputWidth
+ w];
}
@@ -125,10 +142,10 @@ void Pad(const TensorInfo& inputInfo,
{
for (unsigned int w = 0; w < inputWidth ; w++)
{
- outData[(b+std::get<0>(m_PadList[0])) * outputChannels * outputHeight * outputWidth
- + (c+std::get<0>(m_PadList[1])) * outputHeight * outputWidth
- + (h+std::get<0>(m_PadList[2])) * outputWidth
- + (w+std::get<0>(m_PadList[3]))] = inputData[b * inputChannels * inputHeight
+ outData[(b+std::get<0>(m_padList[0])) * outputChannels * outputHeight * outputWidth
+ + (c+std::get<0>(m_padList[1])) * outputHeight * outputWidth
+ + (h+std::get<0>(m_padList[2])) * outputWidth
+ + (w+std::get<0>(m_padList[3]))] = inputData[b * inputChannels * inputHeight
* inputWidth
+ c * inputHeight * inputWidth
+ h * inputWidth
@@ -150,11 +167,13 @@ template void Pad<float>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
const float* inputData,
- float* outData);
+ float* outData,
+ const float padValue);
template void Pad<uint8_t>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
const uint8_t* inputData,
- uint8_t* outData);
+ uint8_t* outData,
+ const float padValue);
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/Pad.hpp b/src/backends/reference/workloads/Pad.hpp
index 42318d6fcf..429718596e 100644
--- a/src/backends/reference/workloads/Pad.hpp
+++ b/src/backends/reference/workloads/Pad.hpp
@@ -15,7 +15,8 @@ namespace armnn
template <typename T>
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ std::vector<std::pair<unsigned int, unsigned int>> m_padList,
const T* inputData,
- T* outData);
+ T* outData,
+ const float padValue);
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 16032e7c77..8cb9d883dc 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -30,8 +30,7 @@ void RefPadWorkload<DataType>::Execute() const
const T* inputData = GetInputTensorData<T>(0, m_Data);
T* outputData = GetOutputTensorData<T>(0, m_Data);
-
- Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData);
+ Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData, m_Data.m_Parameters.m_padValue);
}
template class RefPadWorkload<DataType::Float32>;