ArmNN
 22.08
EthosnRefWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <Layer.hpp>
6 #include <backendsCommon/CpuTensorHandle.hpp>
11 #include "EthosnRefBackendId.hpp"
14 
15 
16 namespace armnn
17 {
18 
19 namespace
20 {
21 static const BackendId s_Id{EthosnRefBackendId()};
22 }
23 
24 EthosnRefWorkloadFactory::EthosnRefWorkloadFactory(const std::shared_ptr<EthosnRefMemoryManager>& memoryManager, bool winograd, std::string BlockConfigs)
25  : m_MemoryManager(memoryManager)
26 {
27  m_EnableWinograd = winograd;
28  m_BlockConfigs = BlockConfigs;
29 }
30 
32  : m_MemoryManager(new EthosnRefMemoryManager())
33 {
34 }
35 
37 {
38  return s_Id;
39 }
40 
42  Optional<DataType> dataType,
43  std::string& outReasonIfUnsupported)
44 {
45  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
46 }
47 
48 std::unique_ptr<ITensorHandle> EthosnRefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
49  const bool isMemoryManaged) const
50 {
51  // For EthosnRef it is okay to make the TensorHandle memory managed as it can also store a pointer
52  // to unmanaged memory. This also ensures memory alignment.
53  IgnoreUnused(isMemoryManaged);
54  return std::make_unique<EthosnRefTensorHandle>(tensorInfo, m_MemoryManager);
55 }
56 
57 std::unique_ptr<ITensorHandle> EthosnRefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
58  DataLayout dataLayout,
59  const bool isMemoryManaged) const
60 {
61  // For EthosnRef it is okay to make the TensorHandle memory managed as it can also store a pointer
62  // to unmanaged memory. This also ensures memory alignment.
63  IgnoreUnused(isMemoryManaged, dataLayout);
64  return std::make_unique<EthosnRefTensorHandle>(tensorInfo, m_MemoryManager);
65 }
66 
67 // REVISIT: Use TensorHandleFactory Import/Export methods to replace Input/Output layers
68 std::unique_ptr<IWorkload> EthosnRefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
69  const WorkloadInfo& info) const
70 {
71  if (info.m_InputTensorInfos.empty() )
72  {
73  throw InvalidArgumentException("EthosnRefWorkloadFactory::CreateInput: Input cannot be zero length");
74  }
75  if (info.m_OutputTensorInfos.empty())
76  {
77  throw InvalidArgumentException("EthosnRefWorkloadFactory::CreateInput: Output cannot be zero length");
78  }
79 
80  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
81  {
82  throw InvalidArgumentException("EthosnRefWorkloadFactory::CreateInput: data input and output differ in byte count.");
83  }
84 
85  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
86 }
87 
88 std::unique_ptr<IWorkload> EthosnRefWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
89  const WorkloadInfo& info) const
90 {
91  if (info.m_InputTensorInfos.empty() )
92  {
93  throw InvalidArgumentException("EthosnRefWorkloadFactory::CreateOutput: Input cannot be zero length");
94  }
95  if (info.m_OutputTensorInfos.empty())
96  {
97  throw InvalidArgumentException("EthosnRefWorkloadFactory::CreateOutput: Output cannot be zero length");
98  }
99  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
100  {
101  throw InvalidArgumentException("EthosnRefWorkloadFactory::CreateOutput: data input and output differ in byte count.");
102  }
103 
104  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
105 }
106 
108  const WorkloadInfo& info) const
109 {
110  return std::make_unique<EthosnRefConvolution2dWorkload>(descriptor, info, m_EnableWinograd, m_BlockConfigs);
111 }
112 
114  const DepthwiseConvolution2dQueueDescriptor& descriptor,
115  const WorkloadInfo& info) const
116 {
117  return std::make_unique<EthosnRefDepthwiseConvolution2dWorkload>(descriptor, info);
118 }
119 
121  const TransposeConvolution2dQueueDescriptor& descriptor,
122  const WorkloadInfo& info) const
123 {
124  return std::make_unique<EthosnRefTransposeConvolution2dWorkload>(descriptor, info);
125 }
126 
127 std::unique_ptr<IWorkload> EthosnRefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
128  const WorkloadInfo& info) const
129 {
130  return std::make_unique<EthosnRefPooling2dWorkload>(descriptor, info);
131 }
132 
133 } // namespace armnn
std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
DataLayout
Definition: Types.hpp:62
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::vector< TensorInfo > m_InputTensorInfos
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
const BackendId & GetBackendId() const override
std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::vector< TensorInfo > m_OutputTensorInfos
std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const override
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
constexpr const char * EthosnRefBackendId()
Contains information about TensorInfos of a layer.
Depthwise Convolution 2D layer workload data.
std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override