blob: 6af89c1f014f429f8cc91c4016a589c1f7847880 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <backends/neon/NeonLayerSupport.hpp>
#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
#include <backends/Workload.hpp>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <boost/optional.hpp>
#include <memory>
namespace armnn
{
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const boost::optional<TensorInfo>& biases);
template<armnn::DataType... dataTypes>
class NeonConvolution2dBaseWorkload : public TypedWorkload<Convolution2dQueueDescriptor, dataTypes...>
{
public:
using TypedWorkload<Convolution2dQueueDescriptor, dataTypes...>::m_Data;
NeonConvolution2dBaseWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void ValidateData() const {};
protected:
std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer;
std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
void FreeUnusedTensors();
};
} //namespace armnn
|