aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp10
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp1
-rw-r--r--src/backends/neon/workloads/NeonReshapeWorkload.cpp9
-rw-r--r--src/backends/neon/workloads/NeonReshapeWorkload.hpp5
4 files changed, 21 insertions, 4 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 80695fb3c7..b2b165ec4e 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -40,6 +40,7 @@
#include "workloads/NeonPreluWorkload.hpp"
#include "workloads/NeonQuantizeWorkload.hpp"
#include "workloads/NeonQuantizedLstmWorkload.hpp"
+#include "workloads/NeonReshapeWorkload.hpp"
#include "workloads/NeonResizeWorkload.hpp"
#include "workloads/NeonSoftmaxBaseWorkload.hpp"
#include "workloads/NeonSpaceToDepthWorkload.hpp"
@@ -499,14 +500,15 @@ bool NeonLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
}
bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
+ const TensorInfo& output,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(descriptor);
- return IsSupportedForDataTypeNeon(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
}
bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 7ba90e4c93..58181d375b 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -174,6 +174,7 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsReshapeSupported(const TensorInfo& input,
+ const TensorInfo& output,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
index 7f2056c8e2..659bb94723 100644
--- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
@@ -14,6 +14,15 @@
namespace armnn
{
+arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NEReshapeLayer::validate(&aclInputInfo, &aclOutputInfo);
+}
+
NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info)
: BaseWorkload<ReshapeQueueDescriptor>(descriptor, info)
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.hpp b/src/backends/neon/workloads/NeonReshapeWorkload.hpp
index 2202463928..186a02ba26 100644
--- a/src/backends/neon/workloads/NeonReshapeWorkload.hpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.hpp
@@ -6,7 +6,10 @@
#pragma once
#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+#include <armnn/TypesUtils.hpp>
#include <arm_compute/runtime/IFunction.h>
#include <memory>
@@ -14,6 +17,8 @@
namespace armnn
{
+arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
class NeonReshapeWorkload : public BaseWorkload<ReshapeQueueDescriptor>
{
public: