aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-06-19 12:06:19 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-07-10 11:35:02 +0000
commit2ea403d130db0d2853d5c43c29b5112893efc2bf (patch)
treeb2e64805b95825c3cd29f05c5838b9d71124bd4b /src/backends/neon/workloads
parent944fb508b1c30415e423b8916849c66a13867ea4 (diff)
downloadarmnn-2ea403d130db0d2853d5c43c29b5112893efc2bf.tar.gz
IVGCVSW-7785 3D tensors in BATCH_TO_SPACE and SPACE_TO_BATCH in CpuAcc & GpuAcc
* Add Reshape layers before and after to extend support for 3D tensors, as ACL only supports 4D tensors for those layers * Add Unit Tests Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I4431185ce3a3b2f595d2a79bdda7095212d1c52d
Diffstat (limited to 'src/backends/neon/workloads')
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp149
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp11
-rw-r--r--src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp172
-rw-r--r--src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.hpp11
4 files changed, 272 insertions, 71 deletions
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
index 05b5899bdd..f66849a88d 100644
--- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
@@ -1,17 +1,12 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonBatchToSpaceNdWorkload.hpp"
-#include "NeonWorkloadUtils.hpp"
-
-#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <ResolveType.hpp>
-
namespace armnn
{
@@ -21,21 +16,71 @@ arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const BatchToSpaceNdDescriptor& descriptor)
{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+ arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+
+ arm_compute::Status statusBatchToSpace = arm_compute::Status(arm_compute::ErrorCode::OK);
+ arm_compute::Status statusReshapeInput = arm_compute::Status(arm_compute::ErrorCode::OK);
+ arm_compute::Status statusReshapeOutput = arm_compute::Status(arm_compute::ErrorCode::OK);
+
+ arm_compute::TensorInfo aclReshapeInputInfo = aclInputInfo;
+ arm_compute::TensorInfo aclReshapeOutputInfo = aclOutputInfo;
+
+ // When a spacial dimension is missing (rank=3) set W to 1
+ const unsigned int rank = input.GetNumDimensions();
+ if (rank == 3)
+ {
+ const arm_compute::TensorShape inputShape = aclInputInfo.tensor_shape();
+ const arm_compute::TensorShape outputShape = aclOutputInfo.tensor_shape();
+
+ if (descriptor.m_DataLayout == armnn::DataLayout::NHWC)
+ {
+ // In ACL dimensions are right to left: C, W, H, N
+ aclReshapeInputInfo.set_tensor_shape({inputShape.x(), 1, inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({outputShape.x(), 1, outputShape.y(), outputShape.z()});
+ }
+ else if (descriptor.m_DataLayout == armnn::DataLayout::NCHW)
+ {
+ // In ACL dimensions are right to left: W, H, C, N
+ aclReshapeInputInfo.set_tensor_shape({1, inputShape.x(), inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({1, outputShape.x(), outputShape.y(), outputShape.z()});
+ }
+ else
+ {
+ throw InvalidArgumentException("Unsupported or unknown DataLayout", CHECK_LOCATION());
+ }
+
+ statusReshapeInput = arm_compute::NEReshapeLayer::validate(&aclInputInfo, &aclReshapeInputInfo);
+ statusReshapeOutput = arm_compute::NEReshapeLayer::validate(&aclReshapeOutputInfo, &aclOutputInfo);
+ }
- // ArmNN blockShape is [H, W] Cl asks for W, H
+ // ArmNN blockShape is [H, W] ACl asks for W, H
int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
- int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+ int32_t blockWidth = (rank == 3) ? 1 : armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+
+ const arm_compute::CropInfo cropInfo = BuildArmComputeCropInfo(descriptor, rank);
- const arm_compute::CropInfo cropInfo = BuildArmComputeCropInfo(descriptor);
+ statusBatchToSpace = arm_compute::NEBatchToSpaceLayer::validate(rank == 3 ? &aclReshapeInputInfo : &aclInputInfo,
+ blockWidth,
+ blockHeight,
+ rank == 3 ? &aclReshapeOutputInfo : &aclOutputInfo,
+ cropInfo);
- const arm_compute::Status aclStatus = arm_compute::NEBatchToSpaceLayer::validate(&aclInputInfo,
- blockWidth,
- blockHeight,
- &aclOutputInfo,
- cropInfo);
- return aclStatus;
+ if (statusReshapeInput.error_code() == arm_compute::ErrorCode::OK &&
+ statusReshapeOutput.error_code() == arm_compute::ErrorCode::OK &&
+ statusBatchToSpace.error_code() == arm_compute::ErrorCode::OK)
+ {
+ return arm_compute::Status(arm_compute::ErrorCode::OK,
+ "All BatchToSpace layers validate status OK.");
+ }
+ else
+ {
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
+ "BatchToSpace layer validate status failed."
+ + statusBatchToSpace.error_description()
+ + statusReshapeInput.error_description()
+ + statusReshapeOutput.error_description());
+ }
}
NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDescriptor& descriptor,
@@ -50,33 +95,85 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue
m_Data.ValidateInputsOutputs("NeonBatchToSpaceNdWorkload", 1, 1);
- arm_compute::ITensor& input =
- armnn::PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output =
- armnn::PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
- // ArmNN blockShape is [H, W] Cl asks for W, H
+ arm_compute::TensorInfo aclReshapeInputInfo = BuildArmComputeTensorInfo(info.m_InputTensorInfos[0],
+ m_Data.m_Parameters.m_DataLayout);
+ arm_compute::TensorInfo aclReshapeOutputInfo = BuildArmComputeTensorInfo(info.m_OutputTensorInfos[0],
+ m_Data.m_Parameters.m_DataLayout);
+
+ const unsigned int rank = info.m_InputTensorInfos[0].GetNumDimensions();
+ if (rank == 3)
+ {
+ const arm_compute::TensorShape inputShape = input.info()->tensor_shape();
+ const arm_compute::TensorShape outputShape = output.info()->tensor_shape();
+
+ // When a spacial dimension is missing set W to 1
+ if (m_Data.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
+ {
+ // In ACL dimensions are right to left: C, W, H, N
+ aclReshapeInputInfo.set_tensor_shape({inputShape.x(), 1, inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({outputShape.x(), 1, outputShape.y(), outputShape.z()});
+ }
+ else if (m_Data.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
+ {
+ // In ACL dimensions are right to left: W, H, C, N
+ aclReshapeInputInfo.set_tensor_shape({1, inputShape.x(), inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({1, outputShape.x(), outputShape.y(), outputShape.z()});
+ }
+ else
+ {
+ throw InvalidArgumentException("Unsupported or unknown DataLayout", CHECK_LOCATION());
+ }
+
+ m_ReshapeInputTensor.allocator()->init(aclReshapeInputInfo);
+ m_ReshapeOutputTensor.allocator()->init(aclReshapeOutputInfo);
+
+ InitialiseArmComputeTensorEmpty(m_ReshapeInputTensor);
+ InitialiseArmComputeTensorEmpty(m_ReshapeOutputTensor);
+
+ m_LayerReshapeInput.reset(new arm_compute::NEReshapeLayer());
+ m_LayerReshapeOutput.reset(new arm_compute::NEReshapeLayer());
+
+ m_LayerReshapeInput->configure(&input, &m_ReshapeInputTensor);
+ m_LayerReshapeOutput->configure(&m_ReshapeOutputTensor, &output);
+ }
+
+ // ArmNN blockShape is [H, W] ACl asks for W, H
int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_Parameters.m_BlockShape[1]);
+ int32_t blockWidth = (rank == 3) ? 1 : armnn::numeric_cast<int32_t>(descriptor.m_Parameters.m_BlockShape[1]);
- const arm_compute::CropInfo cropInfo = BuildArmComputeCropInfo(descriptor.m_Parameters);
+ const arm_compute::CropInfo cropInfo = BuildArmComputeCropInfo(descriptor.m_Parameters, rank);
m_Layer.reset(new arm_compute::NEBatchToSpaceLayer());
- m_Layer->configure(&input, blockWidth, blockHeight, &output, cropInfo);
+ m_Layer->configure(rank == 3 ? &m_ReshapeInputTensor : &input,
+ blockWidth,
+ blockHeight,
+ rank == 3 ? &m_ReshapeOutputTensor : &output,
+ cropInfo);
m_Layer->prepare();
}
void NeonBatchToSpaceNdWorkload::Execute() const
{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonBatchToSpaceNdWorkload_Execute", this->GetGuid());
+ if (m_LayerReshapeInput)
+ {
+ m_LayerReshapeInput->run();
+ }
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSpaceToBatchNdWorkload_Execute", this->GetGuid());
m_Layer->run();
}
+ if (m_LayerReshapeOutput)
+ {
+ m_LayerReshapeOutput->run();
+ }
}
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp
index 55f773e42f..ef5bd138c8 100644
--- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp
@@ -1,16 +1,15 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/Tensor.hpp>
-#include <armnn/Descriptors.hpp>
-
#include "NeonBaseWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
#include <arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
namespace armnn
{
@@ -30,6 +29,10 @@ public:
private:
mutable std::unique_ptr<arm_compute::NEBatchToSpaceLayer> m_Layer;
+ mutable std::unique_ptr<arm_compute::NEReshapeLayer> m_LayerReshapeInput;
+ mutable std::unique_ptr<arm_compute::NEReshapeLayer> m_LayerReshapeOutput;
+ arm_compute::Tensor m_ReshapeInputTensor;
+ arm_compute::Tensor m_ReshapeOutputTensor;
};
}
diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
index e0adc6220e..291fa8110e 100644
--- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
@@ -5,13 +5,8 @@
#include "NeonSpaceToBatchNdWorkload.hpp"
-#include "NeonWorkloadUtils.hpp"
-
-#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <ResolveType.hpp>
-
namespace armnn
{
@@ -21,24 +16,77 @@ arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const SpaceToBatchNdDescriptor& descriptor)
{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+ arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+
+ arm_compute::Status statusSpaceToBatch = arm_compute::Status(arm_compute::ErrorCode::OK);
+ arm_compute::Status statusReshapeInput = arm_compute::Status(arm_compute::ErrorCode::OK);
+ arm_compute::Status statusReshapeOutput = arm_compute::Status(arm_compute::ErrorCode::OK);
+
+ arm_compute::TensorInfo aclReshapeInputInfo = aclInputInfo;
+ arm_compute::TensorInfo aclReshapeOutputInfo = aclOutputInfo;
+
+ // When a spacial dimension is missing (rank=3) set W to 1
+ const unsigned int rank = input.GetNumDimensions();
+ if (rank == 3)
+ {
+ const arm_compute::TensorShape inputShape = aclInputInfo.tensor_shape();
+ const arm_compute::TensorShape outputShape = aclOutputInfo.tensor_shape();
+
+ if (descriptor.m_DataLayout == armnn::DataLayout::NHWC)
+ {
+ // In ACL dimensions are right to left: C, W, H, N
+ aclReshapeInputInfo.set_tensor_shape({inputShape.x(), 1, inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({outputShape.x(), 1, outputShape.y(), outputShape.z()});
+ }
+ else if (descriptor.m_DataLayout == armnn::DataLayout::NCHW)
+ {
+ // In ACL dimensions are right to left: W, H, C, N
+ aclReshapeInputInfo.set_tensor_shape({1, inputShape.x(), inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({1, outputShape.x(), outputShape.y(), outputShape.z()});
+ }
+ else
+ {
+ throw InvalidArgumentException("Unsupported or unknown DataLayout", CHECK_LOCATION());
+ }
+
+ statusReshapeInput = arm_compute::NEReshapeLayer::validate(&aclInputInfo, &aclReshapeInputInfo);
+ statusReshapeOutput = arm_compute::NEReshapeLayer::validate(&aclReshapeOutputInfo, &aclOutputInfo);
+ }
- // ArmNN blockShape is [H, W] Cl asks for W, H
+ // ArmNN blockShape is [H, W] ACl asks for W, H
int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
- int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
-
- arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
- descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
- arm_compute::Size2D paddingRightBottom = BuildArmComputeSize2D(
- descriptor.m_PadList[1].second, descriptor.m_PadList[0].second);
-
- return arm_compute::NESpaceToBatchLayer::validate(&aclInputInfo,
- blockWidth,
- blockHeight,
- paddingLeftTop,
- paddingRightBottom,
- &aclOutputInfo);
+ int32_t blockWidth = (rank == 3) ? 1 : armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+
+ unsigned int padLeft = (rank == 3) ? 0 : descriptor.m_PadList[1].first;
+ unsigned int padRight = (rank == 3) ? 0 : descriptor.m_PadList[1].second;
+ arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(padLeft,
+ descriptor.m_PadList[0].first);
+ arm_compute::Size2D paddingRightBottom = BuildArmComputeSize2D(padRight,
+ descriptor.m_PadList[0].second);
+
+ statusSpaceToBatch = arm_compute::NESpaceToBatchLayer::validate(rank == 3 ? &aclReshapeInputInfo : &aclInputInfo,
+ blockWidth,
+ blockHeight,
+ paddingLeftTop,
+ paddingRightBottom,
+ rank == 3 ? &aclReshapeOutputInfo : &aclOutputInfo);
+
+ if (statusReshapeInput.error_code() == arm_compute::ErrorCode::OK &&
+ statusReshapeOutput.error_code() == arm_compute::ErrorCode::OK &&
+ statusSpaceToBatch.error_code() == arm_compute::ErrorCode::OK)
+ {
+ return arm_compute::Status(arm_compute::ErrorCode::OK,
+ "All SpaceToBatch layers validate status OK.");
+ }
+ else
+ {
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
+ "SpaceToBatch layer validate status failed."
+ + statusSpaceToBatch.error_description()
+ + statusReshapeInput.error_description()
+ + statusReshapeOutput.error_description());
+ }
}
NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDescriptor& descriptor,
@@ -53,41 +101,91 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue
m_Data.ValidateInputsOutputs("NESpaceToBatchNdWorkload", 1, 1);
- arm_compute::ITensor& input =
- PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output =
- PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
-
- // ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
-
- arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
- m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
- arm_compute::Size2D paddingRightBottom = BuildArmComputeSize2D(
- m_Data.m_Parameters.m_PadList[1].second, m_Data.m_Parameters.m_PadList[0].second);
+ arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
+ arm_compute::TensorInfo aclReshapeInputInfo = BuildArmComputeTensorInfo(info.m_InputTensorInfos[0],
+ m_Data.m_Parameters.m_DataLayout);
+ arm_compute::TensorInfo aclReshapeOutputInfo = BuildArmComputeTensorInfo(info.m_OutputTensorInfos[0],
+ m_Data.m_Parameters.m_DataLayout);
+
+ const unsigned int rank = info.m_InputTensorInfos[0].GetNumDimensions();
+ if (rank == 3)
+ {
+ const arm_compute::TensorShape inputShape = input.info()->tensor_shape();
+ const arm_compute::TensorShape outputShape = output.info()->tensor_shape();
+
+ // When a spacial dimension is missing set W to 1
+ if (m_Data.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
+ {
+ // In ACL dimensions are right to left: C, W, H, N
+ aclReshapeInputInfo.set_tensor_shape({inputShape.x(), 1, inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({outputShape.x(), 1, outputShape.y(), outputShape.z()});
+ }
+ else if (m_Data.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
+ {
+ // In ACL dimensions are right to left: W, H, C, N
+ aclReshapeInputInfo.set_tensor_shape({1, inputShape.x(), inputShape.y(), inputShape.z()});
+ aclReshapeOutputInfo.set_tensor_shape({1, outputShape.x(), outputShape.y(), outputShape.z()});
+ }
+ else
+ {
+ throw InvalidArgumentException("Unsupported or unknown DataLayout", CHECK_LOCATION());
+ }
+
+ m_ReshapeInputTensor.allocator()->init(aclReshapeInputInfo);
+ m_ReshapeOutputTensor.allocator()->init(aclReshapeOutputInfo);
+
+ InitialiseArmComputeTensorEmpty(m_ReshapeInputTensor);
+ InitialiseArmComputeTensorEmpty(m_ReshapeOutputTensor);
+
+ m_LayerReshapeInput.reset(new arm_compute::NEReshapeLayer());
+ m_LayerReshapeOutput.reset(new arm_compute::NEReshapeLayer());
+
+ m_LayerReshapeInput->configure(&input, &m_ReshapeInputTensor);
+ m_LayerReshapeOutput->configure(&m_ReshapeOutputTensor, &output);
+ }
+
+ // ArmNN blockShape is [H, W] ACl asks for W, H
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = (rank == 3) ? 1: armnn::numeric_cast<int32_t>(descriptor.m_Parameters.m_BlockShape[1]);
+
+ unsigned int padLeft = (rank == 3) ? 0 : descriptor.m_Parameters.m_PadList[1].first;
+ unsigned int padRight = (rank == 3) ? 0 : descriptor.m_Parameters.m_PadList[1].second;
+ arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(padLeft,
+ descriptor.m_Parameters.m_PadList[0].first);
+ arm_compute::Size2D paddingRightBottom = BuildArmComputeSize2D(padRight,
+ descriptor.m_Parameters.m_PadList[0].second);
+
m_Layer.reset(new arm_compute::NESpaceToBatchLayer());
- m_Layer->configure(&input,
+ m_Layer->configure((rank == 3) ? &m_ReshapeInputTensor : &input,
blockWidth,
blockHeight,
paddingLeftTop,
paddingRightBottom,
- &output);
+ (rank == 3) ? &m_ReshapeOutputTensor : &output);
m_Layer->prepare();
}
void NeonSpaceToBatchNdWorkload::Execute() const
{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSpaceToBatchNdWorkload_Execute", this->GetGuid());
+ if (m_LayerReshapeInput)
+ {
+ m_LayerReshapeInput->run();
+ }
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSpaceToBatchNdWorkload_Execute", this->GetGuid());
m_Layer->run();
}
+ if (m_LayerReshapeOutput)
+ {
+ m_LayerReshapeOutput->run();
+ }
}
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.hpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.hpp
index 4bd7d2d4a4..35d70d3fed 100644
--- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.hpp
+++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.hpp
@@ -1,16 +1,15 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/Tensor.hpp>
-#include <armnn/Descriptors.hpp>
-
#include "NeonBaseWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
#include <arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
namespace armnn
{
@@ -30,6 +29,10 @@ public:
private:
mutable std::unique_ptr<arm_compute::NESpaceToBatchLayer> m_Layer;
+ mutable std::unique_ptr<arm_compute::NEReshapeLayer> m_LayerReshapeInput;
+ mutable std::unique_ptr<arm_compute::NEReshapeLayer> m_LayerReshapeOutput;
+ arm_compute::Tensor m_ReshapeInputTensor;
+ arm_compute::Tensor m_ReshapeOutputTensor;
};
} //namespace armnn \ No newline at end of file