ArmNN
 20.08
NeonTransposeConvolution2dWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <arm_compute/runtime/IFunction.h>
11 #include <arm_compute/runtime/Tensor.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
13 
14 #include <arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h>
15 
16 #include <memory>
17 
18 namespace armnn
19 {
20 
22  const TensorInfo& output,
23  const TransposeConvolution2dDescriptor& descriptor,
24  const TensorInfo& weights,
25  const Optional<TensorInfo>& biases);
26 
27 class NeonTransposeConvolution2dWorkload : public BaseWorkload<TransposeConvolution2dQueueDescriptor>
28 {
29 public:
31  const WorkloadInfo& info,
32  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
33 
34  void Execute() const override;
35 
36 private:
37  std::unique_ptr<arm_compute::NEDeconvolutionLayer> m_Layer;
38 
39  std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
40  std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
41 
42  void FreeUnusedTensors();
43 };
44 
45 } //namespace armnn
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Copyright (c) 2020 ARM Limited.
NeonTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
Status
enumeration
Definition: Types.hpp:26
Contains information about inputs and outputs to a layer.