diff options
Diffstat (limited to 'src/backends/neon')
-rw-r--r-- | src/backends/neon/NeonLayerSupport.cpp | 8 | ||||
-rw-r--r-- | src/backends/neon/test/NeonLayerSupportTests.cpp | 37 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonConstantWorkload.cpp | 42 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonConstantWorkload.hpp | 1 |
4 files changed, 84 insertions, 4 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 44e84fb974..5d59ab83aa 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -25,6 +25,7 @@ #include "workloads/NeonBatchNormalizationWorkload.hpp" #include "workloads/NeonBatchToSpaceNdWorkload.hpp" #include "workloads/NeonComparisonWorkload.hpp" +#include "workloads/NeonConstantWorkload.hpp" #include "workloads/NeonConvolution2dWorkload.hpp" #include "workloads/NeonDepthToSpaceWorkload.hpp" #include "workloads/NeonDepthwiseConvolutionWorkload.hpp" @@ -253,10 +254,9 @@ bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> in bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - output.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate, + reasonIfUnsupported, + output); } bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input, diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp index 2d43125506..3b086ad28f 100644 --- a/src/backends/neon/test/NeonLayerSupportTests.cpp +++ b/src/backends/neon/test/NeonLayerSupportTests.cpp @@ -85,4 +85,41 @@ BOOST_AUTO_TEST_CASE(IsMeanSupportedNeon) BOOST_CHECK(result); } +BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon) +{ + std::string reasonIfUnsupported; + + bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::Float16>(reasonIfUnsupported); + BOOST_CHECK(result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::Float32>(reasonIfUnsupported); + BOOST_CHECK(result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::QAsymmU8>(reasonIfUnsupported); + BOOST_CHECK(result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::Boolean>(reasonIfUnsupported); + BOOST_CHECK(!result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::QSymmS16>(reasonIfUnsupported); + BOOST_CHECK(result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::QSymmS8>(reasonIfUnsupported); + BOOST_CHECK(result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::QAsymmS8>(reasonIfUnsupported); + BOOST_CHECK(result); + + result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory, + armnn::DataType::BFloat16>(reasonIfUnsupported); + BOOST_CHECK(result); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp index 1cffbe1448..f7c8a73f78 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.cpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp @@ -19,6 +19,32 @@ namespace armnn { +arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output) +{ + const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + std::array<arm_compute::DataType,8> supportedTypes = { + arm_compute::DataType::BFLOAT16, + arm_compute::DataType::F16, + arm_compute::DataType::F32, + arm_compute::DataType::QASYMM8, + arm_compute::DataType::QASYMM8_SIGNED, + arm_compute::DataType::QSYMM16, + arm_compute::DataType::QSYMM8, + arm_compute::DataType::QSYMM8_PER_CHANNEL + }; + auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type()); + + if (it != end(supportedTypes)) + { + return arm_compute::Status{}; + } + else + { + return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"}; + } +} + NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload<ConstantQueueDescriptor>(descriptor, info) @@ -68,6 +94,22 @@ void NeonConstantWorkload::Execute() const CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output); break; } + case arm_compute::DataType::QASYMM8_SIGNED: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output); + break; + } + case arm_compute::DataType::QSYMM16: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output); + break; + } + case arm_compute::DataType::QSYMM8: + case arm_compute::DataType::QSYMM8_PER_CHANNEL: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output); + break; + } default: { ARMNN_ASSERT_MSG(false, "Unknown data type"); diff --git a/src/backends/neon/workloads/NeonConstantWorkload.hpp b/src/backends/neon/workloads/NeonConstantWorkload.hpp index 18c1547930..f800a45256 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.hpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.hpp @@ -9,6 +9,7 @@ namespace armnn { +arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output); class NeonConstantWorkload : public BaseWorkload<ConstantQueueDescriptor> { |