17 template<armnn::DataType DataType>
20 Execute(m_Data.m_Inputs);
23 template<armnn::DataType DataType>
27 Execute(workingMemDescriptor->
m_Inputs);
30 template<armnn::DataType DataType>
39 const T* inputData = GetInputTensorData<T>(0, m_Data);
40 T* outputData = GetOutputTensorData<T>(0, m_Data);
44 m_Callback(m_Data.m_Guid, m_Data.m_SlotIndex, inputs[0]);
48 Debug(inputInfo, inputData, m_Data.m_Guid, m_Data.m_LayerName, m_Data.m_SlotIndex, m_Data.m_LayerOutputToFile);
51 std::memcpy(outputData, inputData, inputInfo.
GetNumElements()*
sizeof(T));
54 template<armnn::DataType DataType>
void Execute() const override
CPU Execution: Reference C++ kernels.
typename ResolveTypeImpl< DT >::Type ResolveType
Copyright (c) 2021 ARM Limited and Contributors.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
std::vector< ITensorHandle * > m_Inputs
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex, bool outputsToFile)
void ExecuteAsync(ExecutionData &executionData) override
void RegisterDebugCallback(const DebugCallbackFunction &func) override
unsigned int GetNumElements() const
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers