21 m_Weights = std::make_unique<ScopedTensorHandle>(*(descriptor.
m_Weight));
22 const TensorInfo& weightsInfo = m_Weights->GetTensorInfo();
24 m_WeightsDecoder = MakeDecoder<float>(weightsInfo, m_Weights->Map(
true));
25 m_WeightsShape = weightsInfo.GetShape();
30 m_Biases = std::make_unique<ScopedTensorHandle>(*(descriptor.
m_Bias));
31 const TensorInfo& biasesInfo = m_Biases->GetTensorInfo();
32 m_BiasesDecoder = MakeDecoder<float>(biasesInfo, m_Biases->Map(
true));
47 std::vector<ITensorHandle*> outputs)
const 54 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
55 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
60 outputInfo.GetShape(),
64 m_BiasesDecoder.get());
CPU Execution: Reference C++ kernels.
void Execute() const override
bool m_BiasEnabled
Enable/disable bias.
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
void TransposeConvolution2dImpl(const TransposeConvolution2dDescriptor &descriptor, const TensorShape &inputShape, Decoder< float > &inputDecoder, const TensorShape &outputShape, Encoder< float > &outputEncoder, const TensorShape &weightsShape, Decoder< float > &weightsDecoder, Decoder< float > *biasesDecoder)
Copyright (c) 2021 ARM Limited and Contributors.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
std::vector< ITensorHandle * > m_Inputs
TransposeConvolution2dQueueDescriptor m_Data
RefTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
const ConstTensorHandle * m_Weight
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
const ConstTensorHandle * m_Bias