ArmNN
 20.02
NeonConvolution2dWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <arm_compute/runtime/IFunction.h>
11 #include <arm_compute/runtime/Tensor.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
13 
14 #include <memory>
15 
16 namespace armnn
17 {
18 
20  const TensorInfo& output,
21  const Convolution2dDescriptor& descriptor,
22  const TensorInfo& weights,
23  const Optional<TensorInfo>& biases);
24 
25 class NeonConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
26 {
27 public:
29 
31  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
32 
33  void Execute() const override;
34 
35 private:
36  std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer;
37 
38  std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
39  std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
40 
41  void FreeUnusedTensors();
42 
43 };
44 
45 } //namespace armnn
Copyright (c) 2020 ARM Limited.
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
NeonConvolution2dWorkload(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
Contains information about inputs and outputs to a layer.