aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-06-01 16:15:13 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-06-22 14:12:28 +0000
commitf77cab57b3eca1425384d4d5bfe44d76fc7023b9 (patch)
treee51066218697f652a0bc40b618ca279a0f7be3f6 /src/backends/backendsCommon
parentfd5dbe98c780ae7bd390fae536c2dc636e7b61cc (diff)
downloadarmnn-f77cab57b3eca1425384d4d5bfe44d76fc7023b9.tar.gz
IVGCVSW-7785 Extend support for 3D tensors BATCH_TO_SPACE and SPACE_TO_BATCH in CpuRef
* Both layers were assuming 4D tensors, now 3D is supported too. * Remove some unnecessary includes * Add Unit Tests Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I7bdd11e4936a27cd97ec65fd915e6ccaa1494cff
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp105
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp27
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp121
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp15
4 files changed, 243 insertions, 25 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 6a5963ddcb..d4ae08d874 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1815,47 +1815,66 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
- ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
- ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
-
- if (m_Parameters.m_BlockShape.size() != 2)
- {
- throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
- }
-
if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
{
throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
"dimensions as Block Shape.");
}
- const TensorShape& inputShape = inputTensorInfo.GetShape();
-
- std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
- std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
+ if (m_Parameters.m_BlockShape.size() == 2)
+ {
+ ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
+ ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
+ }
+ else if (m_Parameters.m_BlockShape.size() == 1)
+ {
+ ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 3, "input");
+ ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 3, "output");
+ }
+ else
+ {
+ throw InvalidArgumentException(descriptorName + ": Invalid Block and Crops size.");
+ }
+ // Check input + padding and output have the same number of elements
DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[dimensionIndices.GetHeightIndex()] +
+ m_Parameters.m_PadList[0].first + m_Parameters.m_PadList[0].second;
+ const unsigned int inputWidth = (inputTensorInfo.GetNumDimensions() == 3) ? 1 :
+ inputTensorInfo.GetShape()[dimensionIndices.GetWidthIndex()] +
+ m_Parameters.m_PadList[1].first + m_Parameters.m_PadList[1].second;
- const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
- widthPad.first + widthPad.second;
- const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
- heightPad.first + heightPad.second;
+ const int channelsIndex_int = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : -1;
+ const unsigned int channelsIndex = channelsIndex_int < 0 ?
+ static_cast<unsigned int>(channelsIndex_int) + inputTensorInfo.GetNumDimensions()
+ : static_cast<unsigned int>(channelsIndex_int);
- const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
- inputShape[dimensionIndices.GetChannelsIndex()];
- const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
+ const unsigned int numInputElements = inputTensorInfo.GetShape()[0] *
+ inputHeight *
+ inputWidth *
+ inputTensorInfo.GetShape()[channelsIndex];
- if (numOutputElements != numInputElements)
+ if (outputTensorInfo.GetNumElements() != numInputElements)
{
throw InvalidArgumentException(descriptorName + ": Input tensor has " +
- to_string(numInputElements) + " after padding but output tensor has " +
- to_string(numOutputElements) + " elements.");
+ to_string(numInputElements) + " after padding but output tensor has " +
+ to_string(outputTensorInfo.GetNumElements()) + " elements.");
}
- if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
+ // In a 4D tensor, there will be 2 spatialDimensions (H and W), and the for loop will run twice.
+ // In a 3D tensor, there will be 1 spatialDimensions, and the for loop will run once.
+ unsigned int firstSpatialDimension = m_Parameters.m_DataLayout == DataLayout::NCHW ? 2 : 1;
+ for (unsigned int i = 0; i < m_Parameters.m_BlockShape.size(); ++i)
{
- throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
- "divisible by Block Shape in all spatial dimensions");
+ unsigned int spatialDimension = firstSpatialDimension + i;
+ auto inputSize = inputTensorInfo.GetShape()[spatialDimension] +
+ m_Parameters.m_PadList[i].first +
+ m_Parameters.m_PadList[i].second;
+ if (inputSize % m_Parameters.m_BlockShape[i] != 0)
+ {
+ throw InvalidArgumentException(descriptorName + ": Input dimension size after padding must be "
+ "divisible by Block Shape in dimension: " + to_string(spatialDimension) + ".");
+ }
}
std::vector<DataType> supportedTypes =
@@ -2472,6 +2491,42 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+ if (m_Parameters.m_BlockShape.size() != m_Parameters.m_Crops.size())
+ {
+ throw InvalidArgumentException(descriptorName + ": Crops must contain the same number of "
+ "dimensions as Block Shape.");
+ }
+
+ if (m_Parameters.m_BlockShape.size() == 2)
+ {
+ ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
+ ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
+ }
+ else if (m_Parameters.m_BlockShape.size() == 1)
+ {
+ ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 3, "input");
+ ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 3, "output");
+ }
+ else
+ {
+ throw InvalidArgumentException(descriptorName + ": Invalid Block and Crops size.");
+ }
+
+ // In a 4D tensor, there will be 2 spatialDimensions (H and W), and the for loop will run twice.
+ // In a 3D tensor, there will be 1 spatialDimensions, and the for loop will run once.
+ unsigned int firstSpatialDimension = m_Parameters.m_DataLayout == DataLayout::NCHW ? 2 : 1;
+ for (unsigned int i = 0; i < m_Parameters.m_BlockShape.size(); ++i)
+ {
+ unsigned int spatialDimension = firstSpatialDimension + i;
+ unsigned int cropSize = m_Parameters.m_Crops[i].first + m_Parameters.m_Crops[i].second;
+ unsigned int outputSize = inputTensorInfo.GetShape()[spatialDimension] * m_Parameters.m_BlockShape[i];
+ if (cropSize > outputSize)
+ {
+ throw InvalidArgumentException(descriptorName + ": CropSize must be less than or equal to the uncropped"
+ "outputSize in dimension: " + to_string(spatialDimension) + ".");
+ }
+ }
+
std::vector<DataType> supportedTypes =
{
DataType::BFloat16,
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index b3007771c9..4f8b7d0193 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -279,6 +279,33 @@ LayerTestResult<T, 4> BatchToSpaceNdNhwcTest7(
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> BatchToSpaceNdNhwcTest8(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ const unsigned int inputShape[] = {4, 2, 1};
+ const unsigned int outputShape[] = {1, 8, 1};
+
+ std::vector<float> input({
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f
+ });
+
+ std::vector<float> expectedOutput({
+ 1.0f, 3.0f, 5.0f, 7.0f,
+ 2.0f, 4.0f, 6.0f, 8.0f
+ });
+
+ std::vector<unsigned int> blockShape {4};
+ std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}};
+
+ return BatchToSpaceNdHelper<ArmnnType, 3, 3>(workloadFactory, memoryManager, tensorHandleFactory,
+ armnn::DataLayout::NHWC, inputShape, input, blockShape,
+ crops, outputShape, expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> BatchToSpaceNdNchwTest1(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index 92876e18bd..4e40692c8c 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -88,6 +88,59 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
outputTensorInfo.GetShape());
}
+template<typename T>
+LayerTestResult<T, 3> SpaceToBatchNd3DTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::TensorInfo& inputTensorInfo,
+ armnn::TensorInfo& outputTensorInfo,
+ std::vector<float>& inputData,
+ std::vector<float>& outputExpectedData,
+ armnn::SpaceToBatchNdQueueDescriptor descriptor,
+ const float qScale = 1.0f,
+ const int32_t qOffset = 0)
+{
+ IgnoreUnused(memoryManager);
+
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
+ }
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::SpaceToBatchNd,
+ descriptor,
+ info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 3>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -254,6 +307,44 @@ LayerTestResult<T, 4> SpaceToBatchNdPaddingTest(
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> SpaceToBatchNdSimple3DTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::DataLayout dataLayout = armnn::DataLayout::NHWC)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {1, 8, 1};
+ unsigned int outputShape[] = {4, 2, 1};
+
+ armnn::SpaceToBatchNdQueueDescriptor desc;
+ desc.m_Parameters.m_DataLayout = dataLayout;
+ desc.m_Parameters.m_BlockShape = {4};
+ desc.m_Parameters.m_PadList = {{0, 0}};
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 3.0f, 5.0f, 7.0f,
+ 2.0f, 4.0f, 6.0f, 8.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f
+ });
+
+ return SpaceToBatchNd3DTestImpl<T>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdSimpleNhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -463,6 +554,16 @@ LayerTestResult<float, 4> SpaceToBatchNdPaddingNhwcFloat32Test(
tensorHandleFactory);
}
+LayerTestResult<float, 3> SpaceToBatchNdSimpleNhwc3DFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return SpaceToBatchNdSimple3DTest<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleNhwcFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -503,6 +604,16 @@ LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingNhwcFloat16Test(
tensorHandleFactory);
}
+LayerTestResult<armnn::Half, 3> SpaceToBatchNdSimpleNhwc3DFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return SpaceToBatchNdSimple3DTest<armnn::DataType::Float16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -543,6 +654,16 @@ LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
tensorHandleFactory);
}
+LayerTestResult<uint8_t, 3> SpaceToBatchNdSimpleNhwc3DUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return SpaceToBatchNdSimple3DTest<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp
index 7768b162f2..4e87d6ab6c 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp
@@ -91,6 +91,11 @@ LayerTestResult<float, 4> SpaceToBatchNdPaddingNhwcFloat32Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+LayerTestResult<float, 3> SpaceToBatchNdSimpleNhwc3DFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleNhwcFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -111,6 +116,11 @@ LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingNhwcFloat16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+LayerTestResult<armnn::Half, 3> SpaceToBatchNdSimpleNhwc3DFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -131,6 +141,11 @@ LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+LayerTestResult<uint8_t, 3> SpaceToBatchNdSimpleNhwc3DUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,