ArmNN
 20.05
ClConvolution2dWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/Tensor.hpp>
9 #include <armnn/Descriptors.hpp>
10 
12 
13 #include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
14 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
15 
16 #include <memory>
17 
18 namespace armnn
19 {
20 
22  const TensorInfo& output,
23  const Convolution2dDescriptor& descriptor,
24  const TensorInfo& weights,
25  const Optional<TensorInfo>& biases);
26 
27 class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
28 {
29 public:
31  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
32  void Execute() const override;
33 
34 private:
35  mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
36 
37  std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
38  std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
39 
40  void FreeUnusedTensors();
41 };
42 
43 } //namespace armnn
44 
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Copyright (c) 2020 ARM Limited.
ClConvolution2dWorkload(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
Status
enumeration
Definition: Types.hpp:26
Contains information about inputs and outputs to a layer.