aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeclan-ARM <decmce01@arm.com>2024-02-07 13:07:31 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2024-02-09 14:01:11 +0000
commit5e90b831fc31605b5734531387837123fb31d0e0 (patch)
treec75cedfaa5f6170262eb3dfcea6e0522d89e3433
parentddbda6a1ed13f7bef7e0dce07a37e91b062ce98a (diff)
downloadarmnn-5e90b831fc31605b5734531387837123fb31d0e0.tar.gz
IVGCVSW-7569 GpuFsa Op: Add Reshape Operator
* Add Reshape EndToEnd tests to all backends Signed-off-by: Declan-ARM <decmce01@arm.com> Change-Id: Ic6d07ba8de0cf3271ed0e4c6d604e070ccb968e3
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp20
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.cpp10
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.cpp16
-rw-r--r--src/backends/gpuFsa/layers/CMakeLists.txt2
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaReshape.cpp78
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaReshape.hpp21
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp65
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp57
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp20
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp21
10 files changed, 282 insertions, 28 deletions
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index c48aa8aac0..9e60843177 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -496,6 +496,26 @@ TEST_CASE("ClReshapeEndToEndTestFloat16")
ReshapeEndToEndFloat16<armnn::DataType::Float16>(clDefaultBackends);
}
+TEST_CASE("ClReshapeEndToEndTestInt32")
+{
+ ReshapeEndToEnd<armnn::DataType::Signed32>(clDefaultBackends);
+}
+
+TEST_CASE("ClReshapeEndToEndTestInt16")
+{
+ ReshapeEndToEnd<armnn::DataType::QSymmS16>(clDefaultBackends);
+}
+
+TEST_CASE("ClReshapeEndToEndTestUInt8")
+{
+ ReshapeEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
+}
+
+TEST_CASE("ClReshapeEndToEndTestInt8")
+{
+ ReshapeEndToEnd<armnn::DataType::QAsymmS8>(clDefaultBackends);
+}
+
// Resize Bilinear
TEST_CASE("ClResizeBilinearEndToEndFloatNchwTest")
{
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 29eb1adb6c..8c0aac625d 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -13,7 +13,6 @@
#include <armnn/backends/IBackendContext.hpp>
#include <armnn/backends/IMemoryManager.hpp>
-#include <aclCommon/BaseMemoryManager.hpp>
#include <backendsCommon/SubgraphUtils.hpp>
#include <Optimizer.hpp>
@@ -27,6 +26,7 @@
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
#include "layers/GpuFsaElementwiseBinary.hpp"
#include "layers/GpuFsaPooling2d.hpp"
+#include "layers/GpuFsaReshape.hpp"
#include "layers/GpuFsaResize.hpp"
#include "layers/GpuFsaSoftmax.hpp"
@@ -338,6 +338,14 @@ OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgra
GpuFsaPooling2dCreateOp(preCompiledBlobPtr, input, *desc);
break;
}
+ case LayerType::Reshape:
+ {
+ auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ auto desc = PolymorphicDowncast<const ReshapeDescriptor*>(&base.GetParameters());
+ GpuFsaReshapeCreateOp(preCompiledBlobPtr, input, *desc);
+
+ break;
+ }
case (LayerType::Resize):
{
auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index d75f18ccdb..2065998434 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -17,6 +17,7 @@
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
#include "layers/GpuFsaElementwiseBinary.hpp"
#include "layers/GpuFsaPooling2d.hpp"
+#include "layers/GpuFsaReshape.hpp"
#include "layers/GpuFsaResize.hpp"
#include "layers/GpuFsaSoftmax.hpp"
#endif
@@ -206,6 +207,21 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
infos[0],
*desc);
}
+ case LayerType::Reshape:
+ {
+ if (infos.size() != 2)
+ {
+ throw InvalidArgumentException("Invalid number of Reshape TensorInfos. "
+ "TensorInfos should be of format: { input, output }.");
+ }
+
+ auto desc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
+
+ FORWARD_LAYER_VALIDATE_FUNC(GpuFsaReshapeValidate,
+ reasonIfUnsupported,
+ infos[0],
+ *desc);
+ }
case LayerType::Resize:
{
if (infos.size() != 2)
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 38d551140b..b3e8d6a2a0 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -18,6 +18,8 @@ list(APPEND armnnGpuFsaBackendLayers_sources
GpuFsaElementwiseBinary.hpp
GpuFsaPooling2d.cpp
GpuFsaPooling2d.hpp
+ GpuFsaReshape.cpp
+ GpuFsaReshape.hpp
GpuFsaResize.cpp
GpuFsaResize.hpp
GpuFsaSoftmax.cpp
diff --git a/src/backends/gpuFsa/layers/GpuFsaReshape.cpp b/src/backends/gpuFsa/layers/GpuFsaReshape.cpp
new file mode 100644
index 0000000000..47d4e42838
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaReshape.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaReshape.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status GpuFsaReshapeValidate(const TensorInfo& input, const ReshapeDescriptor& descriptor)
+{
+ auto compileContext = arm_compute::CLKernelLibrary::get().get_compile_context();
+ auto workloadContext = GpuWorkloadContext(&compileContext);
+
+ GpuWorkloadSketch sketch(&workloadContext);
+
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions());
+ aclInputInfo.set_are_values_constant(input.IsConstant());
+
+ arm_compute::ITensorInfo* inputInfo = workloadContext.create_tensor_info(aclInputInfo);
+
+ ReshapeAttributes attributes;
+ attributes.shape(BuildArmComputeTensorShape(descriptor.m_TargetShape));
+
+ arm_compute::Status aclStatus = GpuReshape::validate_op(sketch, inputInfo, attributes);
+
+#ifndef NDEBUG
+ if (aclStatus.error_code() != arm_compute::ErrorCode::OK)
+ {
+ std::cout << "GpuFsaReshapeValidate failed: " << aclStatus.error_description() << std::endl;
+ }
+#endif
+
+ return aclStatus;
+}
+
+void GpuFsaReshapeCreateOp(GpuFsaPreCompiledBlob* blob, const TensorInfo& input, const ReshapeDescriptor& descriptor)
+{
+ GpuWorkloadSketch* sketch = blob->sketch.get();
+ GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+
+ std::vector<arm_compute::ITensorInfo*> inputTensorInfos;
+ std::vector<arm_compute::ITensorInfo*> outputTensorInfos;
+
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions());
+
+ aclInputInfo.set_are_values_constant(input.IsConstant());
+
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
+
+ ReshapeAttributes attributes;
+ attributes.shape(BuildArmComputeTensorShape(descriptor.m_TargetShape));
+
+ arm_compute::ITensorInfo* addOutputInfo = GpuReshape::create_op(*sketch, inputTensorInfos[0], attributes);
+
+ // Temporary fix until fusing attempt is made for GpuFsa backend and outputLayer workoad is created
+ outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+ GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+ // Store the tensorInfos within the blob as std::unique_ptr<> so they can be used later
+ blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+ blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn
+
diff --git a/src/backends/gpuFsa/layers/GpuFsaReshape.hpp b/src/backends/gpuFsa/layers/GpuFsaReshape.hpp
new file mode 100644
index 0000000000..16fa1f975e
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaReshape.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaReshapeValidate(const TensorInfo& input, const ReshapeDescriptor& descriptor);
+
+void GpuFsaReshapeCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input,
+ const ReshapeDescriptor& descriptor);
+
+} // namespace armnn
+
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index 06b2a71dee..329929115a 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -12,6 +12,7 @@
#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
+#include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
#include "backendsCommon/test/ResizeEndToEndTestImpl.hpp"
#include "backendsCommon/test/SoftmaxEndToEndTestImpl.hpp"
@@ -125,15 +126,8 @@ TEST_CASE("GpuFsaAvgPool2DEndtoEndTestFloat16")
TEST_CASE("UNSUPPORTED_GpuFsaAvgPool2DIgnoreValueEndtoEndTestFloat32")
{
// Exclude padding must be set to true in Attributes! to be supported by GPU
- try
- {
- AvgPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue);
- FAIL("An exception should have been thrown");
- }
- catch (const armnn::InvalidArgumentException& e)
- {
- CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0);
- }
+ CHECK_THROWS_AS(AvgPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue),
+ armnn::InvalidArgumentException);
}
// Max Pool 2D
@@ -150,15 +144,40 @@ TEST_CASE("GpuFsaMaxPool2DEndtoEndTestFloat16")
TEST_CASE("UNSUPPORTED_GpuFsaMaxPool2DIgnoreValueEndtoEndTestFloat32")
{
// Exclude padding must be set to true in Attributes! to be supported by GPU
- try
- {
- MaxPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue);
- FAIL("An exception should have been thrown");
- }
- catch (const armnn::InvalidArgumentException& e)
- {
- CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0);
- }
+ CHECK_THROWS_AS(MaxPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue),
+ armnn::InvalidArgumentException);
+}
+
+// Reshape
+TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestFloat32")
+{
+ CHECK_THROWS_AS(ReshapeEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends), armnn::InvalidArgumentException);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestFloat16")
+{
+ CHECK_THROWS_AS(ReshapeEndToEndFloat16<armnn::DataType::Float16>(gpuFsaDefaultBackends),
+ armnn::InvalidArgumentException);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestInt32")
+{
+ CHECK_THROWS_AS(ReshapeEndToEnd<armnn::DataType::Signed32>(gpuFsaDefaultBackends),armnn::InvalidArgumentException);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestInt16")
+{
+ CHECK_THROWS_AS(ReshapeEndToEnd<armnn::DataType::QSymmS16>(gpuFsaDefaultBackends),armnn::InvalidArgumentException);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestUInt8")
+{
+ CHECK_THROWS_AS(ReshapeEndToEnd<armnn::DataType::QAsymmU8>(gpuFsaDefaultBackends),armnn::InvalidArgumentException);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestInt8")
+{
+ CHECK_THROWS_AS(ReshapeEndToEnd<armnn::DataType::QAsymmS8>(gpuFsaDefaultBackends), armnn::InvalidArgumentException);
}
// Resize Bilinear
@@ -187,15 +206,7 @@ TEST_CASE("GpuFsaResizeNearestNeighborEndToEndFloatHalfPixelNhwcTest")
TEST_CASE("UNSUPPORTED_GpuFsaSoftmaxTestFloat32")
{
- try
- {
- SoftmaxEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends);
- FAIL("An exception should have been thrown");
- }
- catch (const armnn::InvalidArgumentException& e)
- {
- CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0);
- }
+ CHECK_THROWS_AS(SoftmaxEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends), armnn::InvalidArgumentException);
}
}
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index cf465c28ff..dce98b389f 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -176,6 +176,63 @@ TEST_CASE("IsLayerSupportedGpuFsaPooling2d")
CHECK(supported);
}
+TEST_CASE("UNSUPPORTED_IsLayerSupportedGpuFsaReshape")
+{
+ TensorInfo inputInfo{};
+ TensorInfo outputInfo{};
+
+ SUBCASE("Float32")
+ {
+ inputInfo = { { 2, 3 }, DataType::Float32 };
+ outputInfo = { { 6 } , DataType::Float32 };
+ }
+
+ SUBCASE("Float16")
+ {
+ inputInfo = { { 2, 3 }, DataType::Float16 };
+ outputInfo = { { 6 } , DataType::Float16 };
+ }
+
+ SUBCASE("Int32")
+ {
+ inputInfo = { { 2, 3 }, DataType::Signed32 };
+ outputInfo = { { 6 } , DataType::Signed32 };
+ }
+
+ SUBCASE("Int16")
+ {
+ inputInfo = { { 2, 3 }, DataType::QSymmS16 };
+ outputInfo = { { 6 } , DataType::QSymmS16 };
+ }
+
+ SUBCASE("UInt8")
+ {
+ inputInfo = { { 2, 3 }, DataType::QAsymmU8 };
+ outputInfo = { { 6 } , DataType::QAsymmU8 };
+ }
+
+ SUBCASE("Int8")
+ {
+ inputInfo = { { 2, 3 }, DataType::QAsymmS8 };
+ outputInfo = { { 6 } , DataType::QAsymmS8 };
+ }
+
+ ReshapeDescriptor desc;
+ desc.m_TargetShape = outputInfo.GetShape();
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+
+ auto supported = supportChecker.IsLayerSupported(LayerType::Reshape,
+ { inputInfo, outputInfo },
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+
+ CHECK(!supported);
+}
+
TEST_CASE("IsLayerSupportedGpuFsaResize")
{
TensorInfo inputInfo({ 1, 5, 5, 1 }, DataType::Float32);
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index f505b7fd46..6634ac6673 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -773,6 +773,26 @@ TEST_CASE("NeonReshapeEndToEndTestFloat16")
ReshapeEndToEndFloat16<armnn::DataType::Float16>(neonDefaultBackends);
}
+TEST_CASE("NeonReshapeEndToEndTestInt32")
+{
+ ReshapeEndToEnd<armnn::DataType::Signed32>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonReshapeEndToEndTestInt16")
+{
+ ReshapeEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonReshapeEndToEndTestUInt8")
+{
+ ReshapeEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonReshapeEndToEndTestInt8")
+{
+ ReshapeEndToEnd<armnn::DataType::QAsymmS8>(neonDefaultBackends);
+}
+
// Resize Bilinear
TEST_CASE("NeonResizeBilinearEndToEndFloatNchwTest")
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 866cff8ca3..4af54852fc 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -1886,6 +1886,27 @@ TEST_CASE("RefReshapeEndToEndTestFloat16")
ReshapeEndToEndFloat16<armnn::DataType::Float16>(defaultBackends);
}
+TEST_CASE("RefReshapeEndToEndTestInt32")
+{
+ ReshapeEndToEnd<armnn::DataType::Signed32>(defaultBackends);
+}
+
+TEST_CASE("RefReshapeEndToEndTestInt16")
+{
+ ReshapeEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
+}
+
+TEST_CASE("RefReshapeEndToEndTestUInt8")
+{
+ ReshapeEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+}
+
+TEST_CASE("RefReshapeEndToEndTestInt8")
+{
+ ReshapeEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+// Force Import
TEST_CASE("RefForceImportWithAlignedBuffersEndToEndTest")
{
ForceImportWithAlignedBuffersEndToEndTest(defaultBackends);