ArmNN
 22.05
TransposeConvolution2dEndToEndTestImpl.hpp File Reference
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <CommonTestUtils.hpp>
#include <map>
#include <vector>

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
void TransposeConvolution2dEndToEnd (const std::vector< armnn::BackendId > &backends, armnn::DataLayout dataLayout)
 

Function Documentation

◆ TransposeConvolution2dEndToEnd()

void TransposeConvolution2dEndToEnd ( const std::vector< armnn::BackendId > &  backends,
armnn::DataLayout  dataLayout 
)

Definition at line 43 of file TransposeConvolution2dEndToEndTestImpl.hpp.

References TensorInfo::GetShape(), TransposeConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideY, MakeTensorShape(), armnn::NHWC, and armnn::Permute.

45 {
46  using namespace armnn;
47  using T = ResolveType<ArmnnType>;
48 
49  constexpr unsigned int batches = 1u;
50  constexpr unsigned int channels = 1u;
51 
52  constexpr unsigned int wInput = 3u;
53  constexpr unsigned int hInput = wInput;
54 
55  constexpr unsigned int wOutput = 5u;
56  constexpr unsigned int hOutput = wOutput;
57 
58  constexpr unsigned int wWeights = 3u;
59  constexpr unsigned int hWeights = wWeights;
60 
61  TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, dataLayout);
62  TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, dataLayout);
63  TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, dataLayout);
64 
65  const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
66  const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
67 
68  TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
69  TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
70  TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true);
71  TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true);
72 
73  std::vector<float> inputData =
74  {
75  1.f, 1.f, 1.f,
76  1.f, 1.f, 1.f,
77  1.f, 1.f, 1.f
78  };
79 
80  std::vector<float> weightsData =
81  {
82  1.f, 2.f, 3.f,
83  4.f, 5.f, 6.f,
84  7.f, 8.f, 9.f
85  };
86 
87  std::vector<float> biasesData = { 1.f };
88 
89  std::vector<float> expectedOutputData =
90  {
91  6.f, 11.f, 6.f, 11.f, 6.f,
92  11.f, 21.f, 11.f, 21.f, 11.f,
93  6.f, 11.f, 6.f, 11.f, 6.f,
94  11.f, 21.f, 11.f, 21.f, 11.f,
95  6.f, 11.f, 6.f, 11.f, 6.f
96  };
97 
99  descriptor.m_PadLeft = 1;
100  descriptor.m_PadRight = 1;
101  descriptor.m_PadTop = 1;
102  descriptor.m_PadBottom = 1;
103  descriptor.m_StrideX = 2;
104  descriptor.m_StrideY = 2;
105  descriptor.m_BiasEnabled = true;
106  descriptor.m_DataLayout = dataLayout;
107 
108  // swizzle data if needed
109  if (dataLayout == armnn::DataLayout::NHWC)
110  {
111  constexpr size_t dataTypeSize = sizeof(float);
112  const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
113 
114  std::vector<float> tmp(inputData.size());
115  armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
116  inputData = tmp;
117 
118  tmp.resize(weightsData.size());
119  armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
120  weightsData = tmp;
121 
122  tmp.resize(expectedOutputData.size());
123  armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize);
124  expectedOutputData = tmp;
125  }
126 
127  // quantize data
128  std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
129  std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
130  std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
131 
132  using BT = ResolveType<ArmnnBType>;
133  std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
134 
135  ConstTensor weights(weightsInfo, qWeightsData);
136  ConstTensor biases(biasesInfo, qBiasesData);
137 
138  INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
139  inputInfo,
140  outputInfo,
141  weights,
142  Optional<ConstTensor>(biases));
143 
144 
145  EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
146  { { 0, qInputData } },
147  { { 0, qExpectedOutputData } },
148  backends);
149 }
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool m_BiasEnabled
Enable/disable bias.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
armnn::TensorShape MakeTensorShape(unsigned int batches, unsigned int channels, unsigned int height, unsigned int width, armnn::DataLayout layout)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241