38 std::vector<std::unique_ptr<Decoder<float>>> inputDecoders;
39 for (
unsigned int i=0; i<inputs.size(); ++i)
41 inputDecoders.push_back(MakeDecoder<float>(
GetTensorInfo(inputs[i]),
44 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(
GetTensorInfo(outputs[0]),
47 Stack(
m_Data, inputDecoders, *outputEncoder, inputInfo, outputInfo);
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
std::vector< ITensorHandle * > m_Inputs
RefStackWorkload(const StackQueueDescriptor &descriptor, const WorkloadInfo &info)
StackQueueDescriptor m_Data
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
void Execute() const override
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers