ArmNN
 22.11
RefTransposeConvolution2dWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "RefWorkloadUtils.hpp"
10 
11 #include <Profiling.hpp>
12 
13 namespace armnn
14 {
15 
17  const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) :
19 {
20  // set up weights decoder
21  m_Weights = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
22  const TensorInfo& weightsInfo = m_Weights->GetTensorInfo();
23 
24  m_WeightsDecoder = MakeDecoder<float>(weightsInfo, m_Weights->Map(true));
25  m_WeightsShape = weightsInfo.GetShape();
26 
27  // set up biases decoder
28  if (descriptor.m_Parameters.m_BiasEnabled)
29  {
30  m_Biases = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
31  const TensorInfo& biasesInfo = m_Biases->GetTensorInfo();
32  m_BiasesDecoder = MakeDecoder<float>(biasesInfo, m_Biases->Map(true));
33  }
34 }
35 
37 {
39 }
40 
42 {
43  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
44  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
45 }
46 
47 void RefTransposeConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs,
48  std::vector<ITensorHandle*> outputs) const
49 {
50  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTransposeConvolution2dWorkload_Execute");
51 
52  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
53  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
54 
55  std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
56  std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
57 
59  inputInfo.GetShape(),
60  *inputDecoder,
61  outputInfo.GetShape(),
62  *outputEncoder,
63  m_WeightsShape,
64  *m_WeightsDecoder,
65  m_BiasesDecoder.get());
66 }
67 
68 } // namespace armnn
CPU Execution: Reference C++ kernels.
bool m_BiasEnabled
Enable/disable bias.
void TransposeConvolution2dImpl(const TransposeConvolution2dDescriptor &descriptor, const TensorShape &inputShape, Decoder< float > &inputDecoder, const TensorShape &outputShape, Encoder< float > &outputEncoder, const TensorShape &weightsShape, Decoder< float > &weightsDecoder, Decoder< float > *biasesDecoder)
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
TransposeConvolution2dQueueDescriptor m_Data
Definition: Workload.hpp:83
RefTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
void ExecuteAsync(ExecutionData &executionData) override
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers