28 float* output = GetOutputTensorData<float>(0,
m_Data);
29 BOOST_ASSERT(output !=
nullptr);
34 for (
unsigned int inputIdx=0; inputIdx<numInputs; ++inputIdx)
36 const float* input = GetInputTensorData<float>(inputIdx,
m_Data);
37 for (
unsigned int elmt=0; elmt<inputLength; ++elmt)
39 output[(inputIdx * inputLength) + elmt] = input[elmt];
45 std::vector<std::unique_ptr<Decoder<float>>> inputDecoders;
uint32_t m_Axis
0-based axis along which to stack the input tensors.
CPU Execution: Reference C++ kernels.
const StackQueueDescriptor m_Data
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
RefStackWorkload(const StackQueueDescriptor &descriptor, const WorkloadInfo &info)
uint32_t m_NumInputs
Number of input tensors.
std::vector< ITensorHandle * > m_Outputs
virtual void Execute() const override
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
unsigned int GetNumElements() const