ArmNN
 22.02
ClConvolution2dWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/Tensor.hpp>
9 #include <armnn/Descriptors.hpp>
10 
11 #include "ClBaseWorkload.hpp"
12 
13 #include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
14 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
15 
16 #include <cl/ICLTensorProxy.hpp>
17 
18 #include <memory>
19 
20 namespace armnn
21 {
22 
24  const TensorInfo& output,
25  const Convolution2dDescriptor& descriptor,
26  const TensorInfo& weights,
27  const Optional<TensorInfo>& biases,
28  bool isFastMathEnabled = false,
29  const ActivationDescriptor* activationDescriptor = nullptr);
30 
31 class ClConvolution2dWorkload : public ClBaseWorkload<Convolution2dQueueDescriptor>
32 {
33 public:
35  const WorkloadInfo& info,
36  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
37  const arm_compute::CLCompileContext& clCompileContext,
38  const bool isFastMathEnabled = false);
39  void Execute() const override;
40 
41  arm_compute::ConvolutionMethod GetConvolutionMethod() const;
42 
43  bool SupportsTensorHandleReplacement() const override { return true;};
44 
45 protected:
46  void Reconfigure() override;
47 
48 private:
49  mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
50 
51  std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
52  std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
53 
54  arm_compute::ConvolutionMethod m_ConvolutionMethod;
55 
56  void FreeUnusedTensors();
57 
58  std::unique_ptr<ICLTensorProxy> m_InputProxy;
59  std::unique_ptr<ICLTensorProxy> m_OutputProxy;
60 };
61 
62 } //namespace armnn
63 
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
arm_compute::ConvolutionMethod GetConvolutionMethod() const
ClConvolution2dWorkload(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager, const arm_compute::CLCompileContext &clCompileContext, const bool isFastMathEnabled=false)
Status
enumeration
Definition: Types.hpp:29
bool SupportsTensorHandleReplacement() const override
Contains information about TensorInfos of a layer.