33 const TensorInfo& weightsInfo = m_Weight->GetTensorInfo();
36 const int16_t* inputData = GetInputTensorData<int16_t>(0,
m_Data);;
37 const int16_t* weightsData = m_Weight->template GetConstTensor<int16_t>();
40 const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
41 EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, int16_t, int16_t, int32_t, int64_t>(
43 inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
44 weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
46 outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo,
true);
49 const int8_t* inputData = GetInputTensorData<int8_t>(0,
m_Data);;
50 const int8_t* weightsData = m_Weight->template GetConstTensor<int8_t>();
51 const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() :
nullptr;
52 const TensorInfo& outputInfo =
GetTensorInfo(m_Data.m_Outputs[0]);
53 const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
54 EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, int8_t, int8_t, int32_t, int64_t>(
56 inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
57 weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
59 outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo,
true);
64 const uint8_t* inputData = GetInputTensorData<uint8_t>(0,
m_Data);;
65 const uint8_t* weightsData = m_Weight->template GetConstTensor<uint8_t>();
66 const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() :
nullptr;
67 const TensorInfo& outputInfo =
GetTensorInfo(m_Data.m_Outputs[0]);
68 const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
69 EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, uint8_t, uint8_t, int32_t, int32_t>(
71 inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
72 weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
74 outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo,
true);
bool m_BiasEnabled
Enable/disable bias.
LayerDescriptor m_Parameters
bool CheckDataType(DataType type, DataType inputType, DataType weightsType)
DepthwiseConvolution2dQueueDescriptor m_Data
#define ARMNN_SCOPED_PROFILING_EVENT_ETHOSN(name)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers