ArmNN
 24.02
NeonConstantWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <arm_compute/core/Types.h>
9 #include <BFloat16.hpp>
10 #include <Half.hpp>
15 #include "NeonBaseWorkload.hpp"
16 
17 namespace armnn
18 {
19 
21 {
22  const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
23 
24  std::array<arm_compute::DataType,9> supportedTypes = {
25  arm_compute::DataType::BFLOAT16,
26  arm_compute::DataType::F16,
27  arm_compute::DataType::F32,
28  arm_compute::DataType::QASYMM8,
29  arm_compute::DataType::QASYMM8_SIGNED,
30  arm_compute::DataType::QSYMM16,
31  arm_compute::DataType::QSYMM8,
32  arm_compute::DataType::QSYMM8_PER_CHANNEL,
33  arm_compute::DataType::S32
34  };
35  auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
36 
37  if (it != end(supportedTypes))
38  {
39  return arm_compute::Status{};
40  }
41  else
42  {
43  return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
44  }
45 }
46 
48  const WorkloadInfo& info)
50  , m_RanOnce(false)
51 {
52 }
53 
55 {
56  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConstantWorkload_Execute");
57 
58  using namespace armcomputetensorutils;
59 
60  // The intermediate tensor held by the corresponding layer output handler can be initialised with the
61  // given data on the first inference, then reused for subsequent inferences.
62  // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
63  // may not have been configured at the time.
64  if (!m_RanOnce)
65  {
66  const ConstantQueueDescriptor& data = this->m_Data;
67 
68  ARMNN_ASSERT(data.m_LayerOutput != nullptr);
69  arm_compute::ITensor& output =
70  PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
71  arm_compute::DataType computeDataType =
72  PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
73 
74  switch (computeDataType)
75  {
76  case arm_compute::DataType::BFLOAT16:
77  {
78  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<BFloat16>(), output);
79  break;
80  }
81  case arm_compute::DataType::F16:
82  {
83  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
84  break;
85  }
86  case arm_compute::DataType::F32:
87  {
88  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
89  break;
90  }
91  case arm_compute::DataType::QASYMM8:
92  {
93  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
94  break;
95  }
96  case arm_compute::DataType::QASYMM8_SIGNED:
97  {
98  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
99  break;
100  }
101  case arm_compute::DataType::QSYMM16:
102  {
103  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output);
104  break;
105  }
106  case arm_compute::DataType::QSYMM8:
107  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
108  {
109  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
110  break;
111  }
112  case arm_compute::DataType::S32:
113  {
114  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int32_t>(), output);
115  break;
116  }
117  default:
118  {
119  ARMNN_ASSERT_MSG(false, "Unknown data type");
120  break;
121  }
122  }
123 
124  m_RanOnce = true;
125  }
126 }
127 
128 } //namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::NeonConstantWorkload::Execute
virtual void Execute() const override
Definition: NeonConstantWorkload.cpp:54
armnn::TensorInfo
Definition: Tensor.hpp:152
NeonTensorHandle.hpp
NeonBaseWorkload.hpp
armnn::Half
half_float::half Half
Definition: Half.hpp:22
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::ConstantQueueDescriptor
Definition: WorkloadData.hpp:368
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::ConstTensorHandle::GetConstTensor
const T * GetConstTensor() const
Definition: TensorHandle.hpp:28
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
PolymorphicDowncast.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::ConstantQueueDescriptor::m_LayerOutput
const ConstTensorHandle * m_LayerOutput
Definition: WorkloadData.hpp:375
Half.hpp
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::BaseWorkload< ConstantQueueDescriptor >::m_Data
ConstantQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::NeonConstantWorkload::NeonConstantWorkload
NeonConstantWorkload(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: NeonConstantWorkload.cpp:47
NeonConstantWorkload.hpp
armnn::BFloat16
Definition: BFloat16.hpp:15
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
BFloat16.hpp
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: NeonWorkloadUtils.hpp:32
armnn::NeonBaseWorkload
Definition: NeonBaseWorkload.hpp:13