diff options
-rw-r--r-- | src/backends/aclCommon/ArmComputeTensorUtils.cpp | 6 | ||||
-rw-r--r-- | src/backends/aclCommon/ArmComputeTensorUtils.hpp | 3 | ||||
-rw-r--r-- | src/backends/aclCommon/ArmComputeUtils.hpp | 36 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp | 7 | ||||
-rw-r--r-- | src/backends/neon/NeonLayerSupport.cpp | 34 | ||||
-rw-r--r-- | src/backends/neon/NeonLayerSupport.hpp | 7 | ||||
-rw-r--r-- | src/backends/neon/NeonWorkloadFactory.cpp | 21 | ||||
-rw-r--r-- | src/backends/neon/NeonWorkloadFactory.hpp | 3 | ||||
-rw-r--r-- | src/backends/neon/backend.mk | 1 | ||||
-rw-r--r-- | src/backends/neon/test/NeonLayerTests.cpp | 55 | ||||
-rw-r--r-- | src/backends/neon/workloads/CMakeLists.txt | 2 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonConvolution3dWorkload.cpp | 116 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonConvolution3dWorkload.hpp | 43 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonWorkloads.hpp | 1 |
14 files changed, 331 insertions, 4 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp index 06309319f3..62f3263a0c 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include <aclCommon/ArmComputeTensorUtils.hpp> @@ -143,6 +143,10 @@ arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout) case armnn::DataLayout::NCHW : return arm_compute::DataLayout::NCHW; + case armnn::DataLayout::NDHWC : return arm_compute::DataLayout::NDHWC; + + case armnn::DataLayout::NCDHW : return arm_compute::DataLayout::NCDHW; + default: throw InvalidArgumentException("Unknown armnn::DataLayout: [" + std::to_string(static_cast<int>(dataLayout)) + "]"); } diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp index 011f44dc69..ad5d4614fe 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -12,7 +12,6 @@ #include <arm_compute/core/ITensor.h> #include <arm_compute/core/TensorInfo.h> #include <arm_compute/core/Types.h> -#include <arm_compute/core/Size2D.h> #include <Half.hpp> diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp index 059518acd6..2f767891a1 100644 --- a/src/backends/aclCommon/ArmComputeUtils.hpp +++ b/src/backends/aclCommon/ArmComputeUtils.hpp @@ -11,6 +11,7 @@ #include <backendsCommon/WorkloadData.hpp> #include <arm_compute/core/Types.h> +#include <arm_compute/runtime/FunctionDescriptors.h> #if defined(ARMCOMPUTENEON_ENABLED) #include "neon/workloads/NeonReduceWorkload.hpp" @@ -264,6 +265,41 @@ inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo return static_cast<unsigned int>(positiveAxis); } +/// Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor. +inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dDescriptor descriptor, + bool isFastMathEnabled, + const ActivationDescriptor* activationDescriptor) +{ + const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ}; + const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight, + descriptor.m_PadTop, descriptor.m_PadBottom, + descriptor.m_PadFront, descriptor.m_PadBack}; + const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ}; + + const arm_compute::ActivationLayerInfo activationInfo = + ConvertActivationDescriptorToAclActivationLayerInfo(activationDescriptor); + const auto roundType = arm_compute::DimensionRoundingType::FLOOR; + + return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled}; +} + +inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dQueueDescriptor queueDescriptor, + bool isFastMathEnabled) +{ + auto descriptor = queueDescriptor.m_Parameters; + const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ}; + const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight, + descriptor.m_PadTop, descriptor.m_PadBottom, + descriptor.m_PadFront, descriptor.m_PadBack}; + const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ}; + + const arm_compute::ActivationLayerInfo activationInfo = + ConvertAdditionalInfoToAclActivationLayerInfo(queueDescriptor); + const auto roundType = arm_compute::DimensionRoundingType::FLOOR; + + return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled}; +} + inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor) { switch (descriptor.m_ReduceOperation) diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp index 1406ab039b..a592ea3f31 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp @@ -89,6 +89,11 @@ void SetScaleOffset(float& qScale, int32_t& qOffset) break; } case armnn::DataType::QAsymmS8: + { + qScale = 0.1f; + qOffset = 64; + break; + } case armnn::DataType::QSymmS16: { qScale = 0.1f; @@ -100,7 +105,7 @@ void SetScaleOffset(float& qScale, int32_t& qOffset) case armnn::DataType::Float32: default: { - qScale = 0.f; + qScale = 1.f; qOffset = 0; break; } diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index d736557f2a..d5dd238bd8 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -33,6 +33,7 @@ #include "workloads/NeonConcatWorkload.hpp" #include "workloads/NeonConstantWorkload.hpp" #include "workloads/NeonConvolution2dWorkload.hpp" +#include "workloads/NeonConvolution3dWorkload.hpp" #include "workloads/NeonDepthToSpaceWorkload.hpp" #include "workloads/NeonDepthwiseConvolutionWorkload.hpp" #include "workloads/NeonDequantizeWorkload.hpp" @@ -373,6 +374,39 @@ bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input, nullptr); } +bool NeonLayerSupport::IsConvolution3dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution3dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + Optional<std::string&> reasonIfUnsupported) const +{ + bool isFastMathEnabled = false; +#if defined(ARMCOMPUTENEON_ENABLED) + if (m_ModelContextPtr) + { + if (m_ModelContextPtr.get() != nullptr) + { + auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get()); + if (modelOptions) + { + isFastMathEnabled = modelOptions->IsFastMathEnabled(); + } + } + } +#endif + + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution3dWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor, + weights, + biases, + isFastMathEnabled, + nullptr); +} + bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, const TensorInfo& output, const DepthToSpaceDescriptor& descriptor, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 155d96acdc..16507c595e 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -94,6 +94,13 @@ public: const Optional<TensorInfo>& biases, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsConvolution3dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution3dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsDepthToSpaceSupported(const TensorInfo& input, const TensorInfo& output, const DepthToSpaceDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 605b03d6b7..4c9397b950 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -240,6 +240,27 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d( isFastMathEnabled); } +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution3d( + const Convolution3dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + bool isFastMathEnabled = false; + if (m_ModelContextPtr) + { + if (m_ModelContextPtr.get() != nullptr) + { + auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get()); + if (modelOptions) + { + isFastMathEnabled = modelOptions->IsFastMathEnabled(); + } + } + } + return std::make_unique<NeonConvolution3dWorkload>(descriptor, + info, + m_MemoryManager->GetIntraLayerManager(), + isFastMathEnabled); +} + std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index bd84c057f5..f44681a5a8 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -97,6 +97,9 @@ public: std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 9869af0615..daf2e5eb09 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -38,6 +38,7 @@ BACKEND_SOURCES := \ workloads/NeonConvertFp16ToFp32Workload.cpp \ workloads/NeonConvertFp32ToFp16Workload.cpp \ workloads/NeonConvolution2dWorkload.cpp \ + workloads/NeonConvolution3dWorkload.cpp \ workloads/NeonDepthToSpaceWorkload.cpp \ workloads/NeonDepthwiseConvolutionWorkload.cpp \ workloads/NeonDequantizeWorkload.cpp \ diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 7f32236978..4c58488d4e 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -164,6 +164,61 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2, ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC); +// Convolution 3d - NDHWC +ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, + SimpleConvolution3d3x3x3Float32Test, + false, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8, + SimpleConvolution3d3x3x3Int8Test, + false, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8, + SimpleConvolution3d3x3x3Uint8Test, + false, + DataLayout::NDHWC) + +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32, + Convolution3d2x2x2Strides3x5x5Float32Test, + false, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8, + Convolution3d2x2x2Strides3x5x5Int8Test, + true, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8, + Convolution3d2x2x2Strides3x5x5Uint8Test, + false, + DataLayout::NDHWC) + +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32, + Convolution3dPaddingSame3x3x3Float32Test, + false, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8, + Convolution3dPaddingSame3x3x3Int8Test, + false, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8, + Convolution3dPaddingSame3x3x3Uint8Test, + false, + DataLayout::NDHWC) + +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Stride3x3x3SmallTestFloat32, + Convolution3d2x2x2Stride3x3x3SmallFloat32Test, + false, + DataLayout::NDHWC) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16, + Convolution3d2x3x3Float16Test, + true, + DataLayout::NDHWC) +ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16, + Convolution3d2x2x2SmallFloat16Test, + false, + DataLayout::NDHWC) +#endif + // DepthToSpace ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NCHW); diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 6451f4c5d7..0c64a19bf9 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -36,6 +36,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonConvertFp32ToFp16Workload.hpp NeonConvolution2dWorkload.cpp NeonConvolution2dWorkload.hpp + NeonConvolution3dWorkload.cpp + NeonConvolution3dWorkload.hpp NeonDepthToSpaceWorkload.cpp NeonDepthToSpaceWorkload.hpp NeonDepthwiseConvolutionWorkload.cpp diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp new file mode 100644 index 0000000000..1891981162 --- /dev/null +++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp @@ -0,0 +1,116 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonConvolution3dWorkload.hpp" + +#include <aclCommon/ArmComputeTensorUtils.hpp> +#include <aclCommon/ArmComputeUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> +#include <backendsCommon/TensorHandle.hpp> +#include <neon/workloads/NeonWorkloadUtils.hpp> + +#include <arm_compute/runtime/NEON/functions/NEConv3D.h> + +#include <armnn/Types.hpp> +#include <Half.hpp> + +namespace armnn +{ + +using namespace armcomputetensorutils; + +arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Convolution3dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + bool isFastMathEnabled, + const ActivationDescriptor* activationDescriptor) +{ + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout); + arm_compute::TensorInfo aclBiasesInfo; + arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr; + if (descriptor.m_BiasEnabled) + { + ARMNN_ASSERT(biases.has_value()); + + aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout); + optionalAclBiasesInfo = &aclBiasesInfo; + } + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); + + const arm_compute::Conv3dInfo aclConv3DInfo = ComputeConv3DInfo(descriptor, + isFastMathEnabled, + activationDescriptor); + + return arm_compute::NEConv3D::validate(&aclInputInfo, + &aclWeightsInfo, + optionalAclBiasesInfo, + &aclOutputInfo, + aclConv3DInfo); +} + +NeonConvolution3dWorkload::NeonConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor, + const WorkloadInfo& info, + std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager, + const bool isFastMathEnabled) + : BaseWorkload<Convolution3dQueueDescriptor>(descriptor, info) +{ + IgnoreUnused(memoryManager); + + using arm_compute::NEConv3D; + uint32_t numInputs = m_Data.m_Parameters.m_BiasEnabled ? 3: 2; + m_Data.ValidateInputsOutputs("NeonConvolution3dWorkload", numInputs, 1); + + arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor* biasesPtr = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + biasesPtr = &PolymorphicDowncast<IAclTensorHandle *>(m_Data.m_Inputs[2])->GetTensor(); + } + arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + input.info()->set_data_layout(aclDataLayout); + weights.info()->set_data_layout(aclDataLayout); + output.info()->set_data_layout(aclDataLayout); + + const arm_compute::Conv3dInfo aclConv3DInfo = ComputeConv3DInfo(descriptor, isFastMathEnabled); + + auto convolutionLayer = std::make_unique<arm_compute::NEConv3D>(); + convolutionLayer->configure(&input, + &weights, + biasesPtr, + &output, + aclConv3DInfo); + + // Add details for profiling output + WorkloadInfo detailsInfo; + + detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos; + detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos; + + // Report Profiling Details + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonConvolution3dWorkload_Construct", + descriptor.m_Parameters, + detailsInfo, + this->GetGuid()); + + m_ConvolutionLayer.reset(convolutionLayer.release()); + + ARMNN_ASSERT(m_ConvolutionLayer); + + m_ConvolutionLayer->prepare(); +} + +void NeonConvolution3dWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution3dWorkload_Execute", this->GetGuid()); + m_ConvolutionLayer->run(); +} + +} //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.hpp new file mode 100644 index 0000000000..b5175e963f --- /dev/null +++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.hpp @@ -0,0 +1,43 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <backendsCommon/Workload.hpp> + +#include <arm_compute/runtime/IFunction.h> +#include <arm_compute/runtime/Tensor.h> +#include <arm_compute/runtime/MemoryManagerOnDemand.h> + +#include <memory> + +namespace armnn +{ + +arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Convolution3dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + bool isFastMathEnabled = false, + const ActivationDescriptor* activationDescriptor = nullptr); + +class NeonConvolution3dWorkload : public BaseWorkload<Convolution3dQueueDescriptor> +{ +public: + using BaseWorkload<Convolution3dQueueDescriptor>::m_Data; + + NeonConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor, + const WorkloadInfo& info, + std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager, + const bool isFastMathENabled = false); + + void Execute() const override; + +private: + std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer; +}; + +} //namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 4d51d186e9..a8134a130b 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -20,6 +20,7 @@ #include "NeonConvertFp32ToBf16Workload.hpp" #include "NeonConvertFp32ToFp16Workload.hpp" #include "NeonConvolution2dWorkload.hpp" +#include "NeonConvolution3dWorkload.hpp" #include "NeonDepthToSpaceWorkload.hpp" #include "NeonDepthwiseConvolutionWorkload.hpp" #include "NeonDequantizeWorkload.hpp" |