ArmNN
 20.08
NeonConstantWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <arm_compute/core/Types.h>
9 #include <BFloat16.hpp>
10 #include <Half.hpp>
16 
17 #include <boost/cast.hpp>
18 
19 namespace armnn
20 {
21 
23 {
24  const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
25 
26  std::array<arm_compute::DataType,9> supportedTypes = {
27  arm_compute::DataType::BFLOAT16,
28  arm_compute::DataType::F16,
29  arm_compute::DataType::F32,
30  arm_compute::DataType::QASYMM8,
31  arm_compute::DataType::QASYMM8_SIGNED,
32  arm_compute::DataType::QSYMM16,
33  arm_compute::DataType::QSYMM8,
34  arm_compute::DataType::QSYMM8_PER_CHANNEL,
35  arm_compute::DataType::S32
36  };
37  auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
38 
39  if (it != end(supportedTypes))
40  {
41  return arm_compute::Status{};
42  }
43  else
44  {
45  return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
46  }
47 }
48 
50  const WorkloadInfo& info)
51  : BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
52  , m_RanOnce(false)
53 {
54 }
55 
57 {
58  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantWorkload_Execute");
59 
60  using namespace armcomputetensorutils;
61 
62  // The intermediate tensor held by the corresponding layer output handler can be initialised with the
63  // given data on the first inference, then reused for subsequent inferences.
64  // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
65  // may not have been configured at the time.
66  if (!m_RanOnce)
67  {
68  const ConstantQueueDescriptor& data = this->m_Data;
69 
70  ARMNN_ASSERT(data.m_LayerOutput != nullptr);
71  arm_compute::ITensor& output =
72  PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
73  arm_compute::DataType computeDataType =
74  PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
75 
76  switch (computeDataType)
77  {
78  case arm_compute::DataType::BFLOAT16:
79  {
80  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<BFloat16>(), output);
81  break;
82  }
83  case arm_compute::DataType::F16:
84  {
85  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
86  break;
87  }
88  case arm_compute::DataType::F32:
89  {
90  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
91  break;
92  }
93  case arm_compute::DataType::QASYMM8:
94  {
95  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
96  break;
97  }
98  case arm_compute::DataType::QASYMM8_SIGNED:
99  {
100  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
101  break;
102  }
103  case arm_compute::DataType::QSYMM16:
104  {
105  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output);
106  break;
107  }
108  case arm_compute::DataType::QSYMM8:
109  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
110  {
111  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
112  break;
113  }
114  case arm_compute::DataType::S32:
115  {
116  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int32_t>(), output);
117  break;
118  }
119  default:
120  {
121  ARMNN_ASSERT_MSG(false, "Unknown data type");
122  break;
123  }
124  }
125 
126  m_RanOnce = true;
127  }
128 }
129 
130 } //namespace armnn
NeonConstantWorkload(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info)
DataLayout::NHWC false
const ConstantQueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
const ConstCpuTensorHandle * m_LayerOutput
const T * GetConstTensor() const
Copyright (c) 2020 ARM Limited.
DataType
Definition: Types.hpp:32
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
Status
enumeration
Definition: Types.hpp:26
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
virtual void Execute() const override
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
half_float::half Half
Definition: Half.hpp:16