ArmNN
 20.08
NeonFullyConnectedWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <arm_compute/core/Error.h>
11 #include <arm_compute/runtime/IFunction.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
13 #include <arm_compute/runtime/Tensor.h>
14 
15 #include <memory>
16 
17 namespace armnn
18 {
19 
21  const TensorInfo& output,
22  const TensorInfo& weights,
23  const TensorInfo& biases,
24  const FullyConnectedDescriptor& descriptor);
25 
26 class NeonFullyConnectedWorkload : public BaseWorkload<FullyConnectedQueueDescriptor>
27 {
28 public:
30  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
31  virtual void Execute() const override;
32 
33 private:
34  std::unique_ptr<arm_compute::IFunction> m_FullyConnectedLayer;
35 
36  std::unique_ptr<arm_compute::Tensor> m_WeightsTensor;
37  std::unique_ptr<arm_compute::Tensor> m_BiasesTensor;
38 
39  void FreeUnusedTensors();
40 };
41 
42 } //namespace armnn
43 
NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
Copyright (c) 2020 ARM Limited.
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
Contains information about inputs and outputs to a layer.