39 std::vector<std::unique_ptr<Decoder<float>>> inputDecoders;
40 for (
unsigned int i=0; i<inputs.size(); ++i)
42 inputDecoders.push_back(MakeDecoder<float>(
GetTensorInfo(inputs[i]),
45 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(
GetTensorInfo(outputs[0]),
48 Stack(
m_Data, inputDecoders, *outputEncoder, inputInfo, outputInfo);
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
std::vector< ITensorHandle * > m_Inputs
RefStackWorkload(const StackQueueDescriptor &descriptor, const WorkloadInfo &info)
StackQueueDescriptor m_Data
void ExecuteAsync(ExecutionData &executionData) override
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
void Execute() const override
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers