ArmNN
 22.08
EthosnRefTransposeConvolutionImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
10 
11 #include <armnn/Tensor.hpp>
12 
14 
16 
17 #include <cmath>
18 #include <limits>
19 #include "EthosnRefConvImpl.hpp"
20 
21 namespace armnn
22 {
23 
24 template<typename TransConvData, typename InputType, typename FilterType, typename BiasType, typename AccumulatorType>
25 static void EthosnRefTransposeConvolutionImpl(TransConvData data,
26  const InputType* inputData,
27  float inputScale,
28  int32_t inputOffset,
29  const FilterType* filterData,
30  float filterScale,
31  int32_t filterOffset,
32  const BiasType* biasData,
33  float outputScale,
34  int32_t outputOffset,
35  const TensorInfo& filterInfo)
36 
37 {
38 
39  if (data.m_Parameters.m_BiasEnabled && !biasData)
40  {
41  throw InvalidArgumentException("Biases enabled but no bias data provided");
42  }
43 
44  const TensorInfo& inputInfo = armnn::ethosnref::GetTensorInfo(data.m_Inputs[0]);
45  const TensorInfo& outputInfo = armnn::ethosnref::GetTensorInfo(data.m_Outputs[0]);
46 
47  TensorBufferArrayView<InputType> output(outputInfo.GetShape(),
48  armnn::ethosnref::GetOutputTensorData<InputType>(0, data),
49  data.m_Parameters.m_DataLayout);
50 
51  const armnnUtils::DataLayoutIndexed dataLayoutIndexed(data.m_Parameters.m_DataLayout);
52 
53  const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
54  const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
55  const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
56 
57  unsigned int numBatches = inputInfo.GetShape()[0];
58 
59  unsigned int inputWidth = inputInfo.GetShape()[widthIndex];
60  unsigned int inputHeight = inputInfo.GetShape()[heightIndex];
61  unsigned int inputDepth = inputInfo.GetShape()[channelsIndex];
62 
63  unsigned int filterHeight = filterInfo.GetShape()[heightIndex];
64  unsigned int filterWidth = filterInfo.GetShape()[widthIndex];
65 
66  unsigned int outputHeight = outputInfo.GetShape()[heightIndex];
67  unsigned int outputWidth = outputInfo.GetShape()[widthIndex];
68  unsigned int outputDepth = outputInfo.GetShape()[channelsIndex];
69 
70  unsigned int paddingLeft = data.m_Parameters.m_PadLeft;
71  unsigned int paddingTop = data.m_Parameters.m_PadTop;
72 
73  unsigned int strideX = data.m_Parameters.m_StrideX;
74  unsigned int strideY = data.m_Parameters.m_StrideY;
75 
76  std::vector<AccumulatorType> outputBuffer(outputInfo.GetShape().GetNumElements(), 0);
77 
78  for (unsigned int batch = 0u; batch < numBatches; ++batch)
79  {
80  for (unsigned int yInput = 0u; yInput < inputHeight; ++yInput)
81  {
82  for (unsigned int xInput = 0u; xInput < inputWidth; ++xInput)
83  {
84  unsigned int xOutputOrigin = xInput * strideX - paddingLeft;
85  unsigned int yOutputOrigin = yInput * strideY - paddingTop;
86 
87  for (unsigned int dOutput = 0u; dOutput < outputDepth; ++dOutput)
88  {
89  for (unsigned int yFilters = 0u; yFilters < filterHeight; ++yFilters)
90  {
91  for (unsigned int xFilters = 0u; xFilters < filterWidth; ++xFilters)
92  {
93  unsigned int yOutput = yOutputOrigin + yFilters;
94  unsigned int xOutput = xOutputOrigin + xFilters;
95 
96  if (yOutput < outputHeight && xOutput< outputWidth)
97  {
98  for (unsigned int dInput = 0u; dInput < inputDepth; dInput++)
99  {
100  const unsigned int inputIndex =
101  dataLayoutIndexed.GetIndex(inputInfo.GetShape(), batch, dInput, yInput, xInput);
102 
103  const unsigned int filterIndex =
104  dataLayoutIndexed.GetIndex(filterInfo.GetShape(), dOutput, dInput, yFilters, xFilters);
105 
106  const unsigned int outputIndex = batch * outputDepth * outputHeight * outputWidth +
107  dOutput * outputHeight * outputWidth +
108  yOutput * outputWidth +
109  xOutput;
110 
111  outputBuffer[outputIndex] += (inputData[inputIndex] - numeric_cast<AccumulatorType>(inputOffset)) * (filterData[filterIndex] - numeric_cast<AccumulatorType>(filterOffset));
112  }
113  }
114  }
115  }
116  }
117  }
118  }
119  }
120 
121 
122  // Apply bias (if enabled)
123  for (unsigned int batch = 0u; batch < numBatches; ++batch)
124  {
125  for (unsigned int dOutput = 0u; dOutput < outputDepth; ++dOutput)
126  {
127  for (unsigned int yOutput = 0u; yOutput < outputHeight; ++yOutput)
128  {
129  for (unsigned int xOutput = 0u; xOutput < outputWidth; ++xOutput)
130  {
131  const unsigned int biaspass_outputIndex = batch * outputDepth * outputHeight * outputWidth +
132  dOutput * outputHeight * outputWidth +
133  yOutput * outputWidth +
134  xOutput;
135 
136  if (data.m_Parameters.m_BiasEnabled)
137  {
138  outputBuffer[biaspass_outputIndex] += biasData[dOutput];
139  }
140 
141  if (outputScale != 0.0f)
142  {
143  float multiplier = (inputScale * filterScale) / outputScale;
144  outputBuffer[biaspass_outputIndex] = numeric_cast<AccumulatorType>(EthosnRefQuantizedMultiplierSmallerThanOne(multiplier) * outputBuffer[biaspass_outputIndex])
145  + numeric_cast<AccumulatorType>(outputOffset);
146  outputBuffer[biaspass_outputIndex] = std::min<AccumulatorType>(std::max<AccumulatorType>(outputBuffer[biaspass_outputIndex], std::numeric_limits<InputType>::min()), std::numeric_limits<InputType>::max());
147  }
148 
149  output.Get(batch, dOutput, yOutput, xOutput) = numeric_cast<InputType>( outputBuffer[biaspass_outputIndex] );
150  }
151  }
152  }
153  }
154 }
155 
156 } // namespace arm
Copyright (c) 2021 ARM Limited and Contributors.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35