aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2019-07-01 13:51:07 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2019-07-01 14:48:57 +0100
commit970f43b078eba91c66fb64eadbc9803661ffcda8 (patch)
tree99613d07e62130caff834eadb35f8456b04c63a4
parent6fb339a7d202a9c64d8c7843d630fe8ab7be9f33 (diff)
downloadarmnn-970f43b078eba91c66fb64eadbc9803661ffcda8.tar.gz
IVGCVSW-3365 Add reference workload support for ResizeLayer
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Id551690065dca0686ce597d1f0c14fd73163481e
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp89
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp518
-rw-r--r--src/backends/reference/RefLayerSupport.cpp35
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp10
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp3
-rw-r--r--src/backends/reference/backend.mk3
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp94
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt6
-rw-r--r--src/backends/reference/workloads/RefResizeBilinearWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefResizeWorkload.cpp35
-rw-r--r--src/backends/reference/workloads/RefResizeWorkload.hpp21
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp3
-rw-r--r--src/backends/reference/workloads/Resize.cpp (renamed from src/backends/reference/workloads/ResizeBilinear.cpp)66
-rw-r--r--src/backends/reference/workloads/Resize.hpp23
-rw-r--r--src/backends/reference/workloads/ResizeBilinear.hpp22
16 files changed, 858 insertions, 79 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 1d0be5d1ff..e7915dd40b 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -915,12 +915,12 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
std::vector<DataType> supportedTypes =
- {
- DataType::Float16,
- DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
- };
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
supportedTypes,
@@ -931,29 +931,72 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
"ResizeBilinearQueueDescriptor");
// Resizes bilinear only changes width and height: batch and channel count must match.
+ const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
+ const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
+ if (inputBatchSize != outputBatchSize)
{
- const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
- const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
- if (inputBatchSize != outputBatchSize)
- {
- throw InvalidArgumentException(
- boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
- "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
- }
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
+ "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
}
+ DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+ const unsigned int inputChannelCount =
+ workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
+ const unsigned int outputChannelCount =
+ workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
+ if (inputChannelCount != outputChannelCount)
{
- DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
- const unsigned int inputChannelCount =
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
+ "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
+ }
+}
+
+void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateNumInputs(workloadInfo, "ResizeQueueDescriptor", 1);
+ ValidateNumOutputs(workloadInfo, "ResizeQueueDescriptor", 1);
+
+ ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeQueueDescriptor", 4, "input");
+ ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeQueueDescriptor", 4, "output");
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "ResizeQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+ {workloadInfo.m_InputTensorInfos[0].GetDataType()},
+ "ResizeQueueDescriptor");
+
+ // Resizes only changes width and height: batch and channel count must match.
+ const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
+ const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
+ if (inputBatchSize != outputBatchSize)
+ {
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeQueueDescriptor: Input batch size (%1%) "
+ "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
+ }
+
+ DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+ const unsigned int inputChannelCount =
workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
- const unsigned int outputChannelCount =
+ const unsigned int outputChannelCount =
workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
- if (inputChannelCount != outputChannelCount)
- {
- throw InvalidArgumentException(
- boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
- "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
- }
+ if (inputChannelCount != outputChannelCount)
+ {
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeQueueDescriptor: Input channel count (%1%) "
+ "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
}
}
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index b225e4d655..405ccff35b 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -873,7 +873,7 @@ LayerTestResult<int16_t, 4> TanhInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-/// Tests that the output should be identical to the input when the output dimensions match the input ones.
+// Tests that the output should be identical to the input when the output dimensions match the input ones.
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeBilinearNopTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -909,6 +909,42 @@ LayerTestResult<T, 4> ResizeBilinearMagTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
+// Tests that the output should be identical to the input when the output dimensions match the input ones.
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+// Tests the behaviour of the resize NearestNeighbor operation when rescaling a 2x2 image into a 1x1 image.
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+// Tests the resize NearestNeighbor for minification of a square input matrix (also: input dimensions are a
+// multiple of output dimensions).
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+// Tests the resize NearestNeighbor for minification (output dimensions smaller than input dimensions).
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+// Tests the resize NearestNeighbor for magnification (output dimensions bigger than input dimensions).
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Rsqrt2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
@@ -2927,6 +2963,486 @@ LayerTestResult<T, 4> ResizeBilinearMagTest(
return result;
}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.5f);
+ inputTensorInfo.SetQuantizationOffset(-3);
+ outputTensorInfo.SetQuantizationScale(1.5f);
+ outputTensorInfo.SetQuantizationOffset(-3);
+ }
+
+ std::vector<float> inputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 1, 2, 3, 4,
+ 2, 3, 4, 5,
+ 3, 4, 5, 6,
+ 4, 5, 6, 7
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
+
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f
+ };
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+ inputData = tmp;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputData));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = input;
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(0.1567f);
+ inputTensorInfo.SetQuantizationOffset(1);
+ outputTensorInfo.SetQuantizationScale(0.1567f);
+ outputTensorInfo.SetQuantizationOffset(1);
+ }
+
+ std::vector<float> inputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 1, 255,
+ 200, 250
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f, 255.0f,
+ 200.0f, 250.0f,
+
+ 250.0f, 200.0f,
+ 250.0f, 1.0f
+ };
+
+ // The 'resize' operation projects the top-left corner of output texels into the input image,
+ // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+ // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+ // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+ // which we would expect if projecting the centre).
+
+ std::vector<float> outputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 1
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f,
+
+ 250.0f
+ };
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+ inputData = tmp;
+
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputData));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset(),
+ outputData));
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(3.141592f);
+ inputTensorInfo.SetQuantizationOffset(3);
+ outputTensorInfo.SetQuantizationScale(3.141592f);
+ outputTensorInfo.SetQuantizationOffset(3);
+ }
+
+ std::vector<float> inputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 1, 2, 3, 4,
+ 2, 3, 4, 5,
+ 3, 4, 5, 6,
+ 4, 5, 6, 7
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
+
+ 7.0f, 6.0f, 5.0f, 4.0f,
+ 6.0f, 5.0f, 4.0f, 3.0f,
+ 5.0f, 4.0f, 3.0f, 2.0f,
+ 4.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ std::vector<float> outputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 1, 3,
+ 3, 5
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f, 3.0f,
+ 3.0f, 5.0f,
+
+ 7.0f, 5.0f,
+ 5.0f, 3.0f
+ };
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+ inputData = tmp;
+
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputData));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset(),
+ outputData));
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.5f);
+ inputTensorInfo.SetQuantizationOffset(-1);
+ outputTensorInfo.SetQuantizationScale(1.5f);
+ outputTensorInfo.SetQuantizationOffset(-1);
+ }
+
+ std::vector<float> inputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values
+ 9.0f, 13.5f, 21.0f // 5, 8, 13
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
+ 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
+ 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
+
+ 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
+ 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
+ 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ std::vector<float> outputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 3.0f, 4.5f // 1, 3
+ }
+ : std::initializer_list<float>
+ {
+ 1.f, 2.f, 5.f,
+ 13.f, 21.f, 55.f,
+
+ 987.f, 610.f, 233.f,
+ 89.f, 55.f, 21.f
+ };
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+ inputData = tmp;
+
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputData));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset(),
+ outputData));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+ ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
+ : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(0.010765f);
+ inputTensorInfo.SetQuantizationOffset(7);
+ outputTensorInfo.SetQuantizationScale(0.010132f);
+ outputTensorInfo.SetQuantizationOffset(-18);
+ }
+
+ std::vector<float> inputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 0.183005f, 2.379065f, // 24, 228, : Expected quantised values
+ 1.05497f, 1.302565f, // 105, 128,
+ 2.400595f, 0.68896f // 230, 71
+ }
+ : std::initializer_list<float>
+ {
+ 1.0f, 2.0f,
+ 13.0f, 21.0f,
+ 144.0f, 233.0f,
+
+ 233.0f, 144.0f,
+ 21.0f, 13.0f,
+ 2.0f, 1.0f
+ };
+ std::vector<float> outputData = armnn::IsQuantizedType<T>()
+ ? std::initializer_list<float>
+ {
+ 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
+ 1.05497f, 1.05497f, 1.05497f, 1.302565f, 1.302565f,
+ 2.400595f, 2.400595f, 2.400595f, 0.68896f, 0.68896f
+ }
+ : std::initializer_list<float>
+ {
+ 1.f, 1.f, 1.f, 2.f, 2.f,
+ 13.f, 13.f, 13.f, 21.f, 21.f,
+ 144.f, 144.f, 144.f, 233.f, 233.f,
+
+ 233.f, 233.f, 233.f, 144.f, 144.f,
+ 21.f, 21.f, 21.f, 13.f, 13.f,
+ 2.f, 2.f, 2.f, 1.f, 1.f
+ };
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+ inputData = tmp;
+
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputData));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset(),
+ outputData));
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ return result;
+}
+
+
template<armnn::DataType ArmnnType, typename T, std::size_t InputDim, std::size_t OutputDim>
LayerTestResult<T, OutputDim> MeanTestHelper(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 429993a55f..b563badca5 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1239,11 +1239,11 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
{
bool supported = true;
std::array<DataType,3> supportedTypes =
- {
- DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
- };
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
"Reference ResizeBilinear: input type not supported");
@@ -1257,6 +1257,31 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ bool supported = true;
+ std::array<DataType,3> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference Resize: input type not supported");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference Resize: output type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference Resize: input and output types not matching");
+
+ return supported;
+}
+
bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 9c397fe66b..22b007b378 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -222,6 +222,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index d906f93a38..95a44193de 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -239,6 +239,16 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCop
return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ if (IsFloat16(info))
+ {
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ }
+ return std::make_unique<RefResizeWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 44cb079ea7..1a40259eb9 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -106,6 +106,9 @@ public:
std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 6fb17b563f..7995654663 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -54,6 +54,7 @@ BACKEND_SOURCES := \
workloads/RefQuantizeWorkload.cpp \
workloads/RefReshapeWorkload.cpp \
workloads/RefResizeBilinearWorkload.cpp \
+ workloads/RefResizeWorkload.cpp \
workloads/RefRsqrtWorkload.cpp \
workloads/RefSoftmaxWorkload.cpp \
workloads/RefSpaceToBatchNdWorkload.cpp \
@@ -61,7 +62,7 @@ BACKEND_SOURCES := \
workloads/RefStridedSliceWorkload.cpp \
workloads/RefSplitterWorkload.cpp \
workloads/RefTransposeConvolution2dWorkload.cpp \
- workloads/ResizeBilinear.cpp \
+ workloads/Resize.cpp \
workloads/Rsqrt.cpp \
workloads/SpaceToBatchNd.cpp \
workloads/SpaceToDepth.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 80d53190b6..7797f17a22 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -607,6 +607,100 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc,
ResizeBilinearNopTest<armnn::DataType::QuantisedSymm16>,
armnn::DataLayout::NHWC)
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NCHW)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+ armnn::DataLayout::NHWC)
+
// Fake Quantization
ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 9be245b1a7..3c0af01c00 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -96,6 +96,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefReshapeWorkload.hpp
RefResizeBilinearWorkload.cpp
RefResizeBilinearWorkload.hpp
+ RefResizeWorkload.cpp
+ RefResizeWorkload.hpp
RefRsqrtWorkload.cpp
RefRsqrtWorkload.hpp
RefSoftmaxWorkload.cpp
@@ -112,8 +114,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefTransposeConvolution2dWorkload.hpp
RefWorkloads.hpp
RefWorkloadUtils.hpp
- ResizeBilinear.cpp
- ResizeBilinear.hpp
+ Resize.cpp
+ Resize.hpp
Rsqrt.cpp
Rsqrt.hpp
Softmax.cpp
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
index 03fcec25fe..fc27c0f93d 100644
--- a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
+++ b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
@@ -6,7 +6,7 @@
#include "RefResizeBilinearWorkload.hpp"
#include "RefWorkloadUtils.hpp"
-#include "ResizeBilinear.hpp"
+#include "Resize.hpp"
#include "BaseIterator.hpp"
#include "Profiling.hpp"
@@ -29,7 +29,7 @@ void RefResizeBilinearWorkload::Execute() const
std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
Encoder<float> &encoder = *encoderPtr;
- ResizeBilinear(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout);
+ Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, armnn::ResizeMethod::Bilinear);
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeWorkload.cpp b/src/backends/reference/workloads/RefResizeWorkload.cpp
new file mode 100644
index 0000000000..26225f8823
--- /dev/null
+++ b/src/backends/reference/workloads/RefResizeWorkload.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefResizeWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "Resize.hpp"
+#include "BaseIterator.hpp"
+#include "Profiling.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+
+void RefResizeWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+ Decoder<float> &decoder = *decoderPtr;
+ std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+ Encoder<float> &encoder = *encoderPtr;
+
+ Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_Method);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeWorkload.hpp b/src/backends/reference/workloads/RefResizeWorkload.hpp
new file mode 100644
index 0000000000..1ddfcdfba8
--- /dev/null
+++ b/src/backends/reference/workloads/RefResizeWorkload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+ using BaseWorkload<ResizeQueueDescriptor>::BaseWorkload;
+ virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 3a094c8a32..4bdf05daa8 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -40,6 +40,7 @@
#include "RefPreluWorkload.hpp"
#include "RefQuantizeWorkload.hpp"
#include "RefResizeBilinearWorkload.hpp"
+#include "RefResizeWorkload.hpp"
#include "RefRsqrtWorkload.hpp"
#include "RefReshapeWorkload.hpp"
#include "RefSplitterWorkload.hpp"
@@ -49,7 +50,7 @@
#include "RefSpaceToDepthWorkload.hpp"
#include "RefTransposeConvolution2dWorkload.hpp"
#include "RefWorkloadUtils.hpp"
-#include "ResizeBilinear.hpp"
+#include "Resize.hpp"
#include "Softmax.hpp"
#include "Splitter.hpp"
#include "TensorBufferArrayView.hpp" \ No newline at end of file
diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/Resize.cpp
index 70a051492a..0e0bdd7597 100644
--- a/src/backends/reference/workloads/ResizeBilinear.cpp
+++ b/src/backends/reference/workloads/Resize.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "ResizeBilinear.hpp"
+#include "Resize.hpp"
#include "TensorBufferArrayView.hpp"
@@ -25,13 +25,14 @@ inline float Lerp(float a, float b, float w)
return w * b + (1.f - w) * a;
}
-}
+}// anonymous namespace
-void ResizeBilinear(Decoder<float>& in,
- const TensorInfo& inputInfo,
- Encoder<float>& out,
- const TensorInfo& outputInfo,
- DataLayoutIndexed dataLayout)
+void Resize(Decoder<float>& in,
+ const TensorInfo& inputInfo,
+ Encoder<float>& out,
+ const TensorInfo& outputInfo,
+ DataLayoutIndexed dataLayout,
+ armnn::ResizeMethod resizeMethod)
{
// We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
// image is projected into the input image to figure out the interpolants and weights. Note that this
@@ -83,22 +84,43 @@ void ResizeBilinear(Decoder<float>& in,
const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u);
const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u);
- // Interpolation
- in[dataLayout.GetIndex(inputShape, n, c, y0, x0)];
- float input1 = in.Get();
- in[dataLayout.GetIndex(inputShape, n, c, y0, x1)];
- float input2 = in.Get();
- in[dataLayout.GetIndex(inputShape, n, c, y1, x0)];
- float input3 = in.Get();
- in[dataLayout.GetIndex(inputShape, n, c, y1, x1)];
- float input4 = in.Get();
-
- const float ly0 = Lerp(input1, input2, xw); // lerp along row y0.
- const float ly1 = Lerp(input3, input4, xw); // lerp along row y1.
- const float l = Lerp(ly0, ly1, yw);
-
+ float interpolatedValue;
+ switch (resizeMethod)
+ {
+ case armnn::ResizeMethod::Bilinear:
+ {
+ in[dataLayout.GetIndex(inputShape, n, c, y0, x0)];
+ float input1 = in.Get();
+ in[dataLayout.GetIndex(inputShape, n, c, y0, x1)];
+ float input2 = in.Get();
+ in[dataLayout.GetIndex(inputShape, n, c, y1, x0)];
+ float input3 = in.Get();
+ in[dataLayout.GetIndex(inputShape, n, c, y1, x1)];
+ float input4 = in.Get();
+
+ const float ly0 = Lerp(input1, input2, xw); // lerp along row y0.
+ const float ly1 = Lerp(input3, input4, xw); // lerp along row y1.
+ interpolatedValue = Lerp(ly0, ly1, yw);
+ break;
+ }
+ case armnn::ResizeMethod::NearestNeighbor:
+ default:
+ {
+ auto distance0 = std::sqrt(pow(fix - boost::numeric_cast<float>(x0), 2) +
+ pow(fiy - boost::numeric_cast<float>(y0), 2));
+ auto distance1 = std::sqrt(pow(fix - boost::numeric_cast<float>(x1), 2) +
+ pow(fiy - boost::numeric_cast<float>(y1), 2));
+
+ unsigned int xNearest = distance0 <= distance1? x0 : x1;
+ unsigned int yNearest = distance0 <= distance1? y0 : y1;
+
+ in[dataLayout.GetIndex(inputShape, n, c, yNearest, xNearest)];
+ interpolatedValue = in.Get();
+ break;
+ }
+ }
out[dataLayout.GetIndex(outputShape, n, c, y, x)];
- out.Set(l);
+ out.Set(interpolatedValue);
}
}
}
diff --git a/src/backends/reference/workloads/Resize.hpp b/src/backends/reference/workloads/Resize.hpp
new file mode 100644
index 0000000000..8bd8999e5d
--- /dev/null
+++ b/src/backends/reference/workloads/Resize.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+#include <armnn/Tensor.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+namespace armnn
+{
+
+void Resize(Decoder<float>& in,
+ const TensorInfo& inputInfo,
+ Encoder<float>& out,
+ const TensorInfo& outputInfo,
+ armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW,
+ ResizeMethod resizeMethod = ResizeMethod::NearestNeighbor);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
deleted file mode 100644
index ad2e487f5a..0000000000
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "BaseIterator.hpp"
-#include <armnn/Tensor.hpp>
-
-#include <DataLayoutIndexed.hpp>
-
-namespace armnn
-{
-
-void ResizeBilinear(Decoder<float>& in,
- const TensorInfo& inputInfo,
- Encoder<float>& out,
- const TensorInfo& outputInfo,
- armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW);
-
-} //namespace armnn