ArmNN
 21.02
NeonConvolution2dWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <arm_compute/runtime/IFunction.h>
11 #include <arm_compute/runtime/Tensor.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
13 
14 #include <memory>
15 
16 namespace armnn
17 {
18 
20  const TensorInfo& output,
21  const Convolution2dDescriptor& descriptor,
22  const TensorInfo& weights,
23  const Optional<TensorInfo>& biases,
24  bool isFastMathEnabled = false,
25  const ActivationDescriptor* activationDescriptor = nullptr);
26 
27 class NeonConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
28 {
29 public:
31 
33  const WorkloadInfo& info,
34  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
35  const bool isFastMathENabled = false);
36 
37  void Execute() const override;
38 
39  arm_compute::ConvolutionMethod GetConvolutionMethod() const;
40 
41 private:
42  std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer;
43 
44  std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
45  std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
46 
47  arm_compute::ConvolutionMethod m_ConvolutionMethod;
48 
49  void FreeUnusedTensors();
50 
51 };
52 
53 } //namespace armnn
NeonConvolution2dWorkload(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager, const bool isFastMathENabled=false)
Copyright (c) 2021 ARM Limited and Contributors.
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
arm_compute::ConvolutionMethod GetConvolutionMethod() const
Contains information about inputs and outputs to a layer.