8 #include <arm_compute/core/Types.h> 17 #include <boost/cast.hpp> 24 const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
26 std::array<arm_compute::DataType,9> supportedTypes = {
27 arm_compute::DataType::BFLOAT16,
28 arm_compute::DataType::F16,
29 arm_compute::DataType::F32,
30 arm_compute::DataType::QASYMM8,
31 arm_compute::DataType::QASYMM8_SIGNED,
32 arm_compute::DataType::QSYMM16,
33 arm_compute::DataType::QSYMM8,
34 arm_compute::DataType::QSYMM8_PER_CHANNEL,
35 arm_compute::DataType::S32
37 auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
39 if (it != end(supportedTypes))
60 using namespace armcomputetensorutils;
71 arm_compute::ITensor& output =
72 PolymorphicDowncast<NeonTensorHandle*>(data.
m_Outputs[0])->GetTensor();
74 PolymorphicDowncast<NeonTensorHandle*>(data.
m_Outputs[0])->GetDataType();
76 switch (computeDataType)
78 case arm_compute::DataType::BFLOAT16:
83 case arm_compute::DataType::F16:
88 case arm_compute::DataType::F32:
93 case arm_compute::DataType::QASYMM8:
98 case arm_compute::DataType::QASYMM8_SIGNED:
103 case arm_compute::DataType::QSYMM16:
108 case arm_compute::DataType::QSYMM8:
109 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
114 case arm_compute::DataType::S32:
NeonConstantWorkload(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info)
const ConstantQueueDescriptor m_Data
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
const ConstCpuTensorHandle * m_LayerOutput
const T * GetConstTensor() const
Copyright (c) 2020 ARM Limited.
#define ARMNN_ASSERT_MSG(COND, MSG)
#define ARMNN_ASSERT(COND)
virtual void Execute() const override
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)