ArmNN
 21.11
TransposeConvolution2dEndToEndTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "QuantizeHelper.hpp"
8 
9 
10 #include <armnnUtils/Permute.hpp>
11 
12 #include <QuantizeHelper.hpp>
13 #include <ResolveType.hpp>
14 
16 
17 #include <map>
18 #include <vector>
19 
20 namespace
21 {
22 
23 INetworkPtr CreateTransposeConvolution2dNetwork(const armnn::TransposeConvolution2dDescriptor& descriptor,
24  const armnn::TensorInfo& inputInfo,
25  const armnn::TensorInfo& outputInfo,
26  const armnn::ConstTensor& weights,
28 {
29  using namespace armnn;
30 
31  INetworkPtr network(INetwork::Create());
32  IConnectableLayer* input = network->AddInputLayer(0, "input");
33  IConnectableLayer* transposeConvolution2d =
34  network->AddTransposeConvolution2dLayer(descriptor, weights, biases, "transposeConvolution2d");
35  IConnectableLayer* output = network->AddOutputLayer(0, "output");
36 
37  Connect(input, transposeConvolution2d, inputInfo, 0, 0);
38  Connect(transposeConvolution2d, output, outputInfo, 0, 0);
39 
40  return network;
41 }
42 
43 } // anonymous namespace
44 
45 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
46 void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
47  armnn::DataLayout dataLayout)
48 {
49  using namespace armnn;
50  using T = ResolveType<ArmnnType>;
51 
52  constexpr unsigned int batches = 1u;
53  constexpr unsigned int channels = 1u;
54 
55  constexpr unsigned int wInput = 3u;
56  constexpr unsigned int hInput = wInput;
57 
58  constexpr unsigned int wOutput = 5u;
59  constexpr unsigned int hOutput = wOutput;
60 
61  constexpr unsigned int wWeights = 3u;
62  constexpr unsigned int hWeights = wWeights;
63 
64  TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, dataLayout);
65  TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, dataLayout);
66  TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, dataLayout);
67 
68  const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
69  const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
70 
71  TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
72  TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
73  TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true);
74  TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true);
75 
76  std::vector<float> inputData =
77  {
78  1.f, 1.f, 1.f,
79  1.f, 1.f, 1.f,
80  1.f, 1.f, 1.f
81  };
82 
83  std::vector<float> weightsData =
84  {
85  1.f, 2.f, 3.f,
86  4.f, 5.f, 6.f,
87  7.f, 8.f, 9.f
88  };
89 
90  std::vector<float> biasesData = { 1.f };
91 
92  std::vector<float> expectedOutputData =
93  {
94  6.f, 11.f, 6.f, 11.f, 6.f,
95  11.f, 21.f, 11.f, 21.f, 11.f,
96  6.f, 11.f, 6.f, 11.f, 6.f,
97  11.f, 21.f, 11.f, 21.f, 11.f,
98  6.f, 11.f, 6.f, 11.f, 6.f
99  };
100 
102  descriptor.m_PadLeft = 1;
103  descriptor.m_PadRight = 1;
104  descriptor.m_PadTop = 1;
105  descriptor.m_PadBottom = 1;
106  descriptor.m_StrideX = 2;
107  descriptor.m_StrideY = 2;
108  descriptor.m_BiasEnabled = true;
109  descriptor.m_DataLayout = dataLayout;
110 
111  // swizzle data if needed
112  if (dataLayout == armnn::DataLayout::NHWC)
113  {
114  constexpr size_t dataTypeSize = sizeof(float);
115  const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
116 
117  std::vector<float> tmp(inputData.size());
118  armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
119  inputData = tmp;
120 
121  tmp.resize(weightsData.size());
122  armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
123  weightsData = tmp;
124 
125  tmp.resize(expectedOutputData.size());
126  armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize);
127  expectedOutputData = tmp;
128  }
129 
130  // quantize data
131  std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
132  std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
133  std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
134 
135  using BT = ResolveType<ArmnnBType>;
136  std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
137 
138  ConstTensor weights(weightsInfo, qWeightsData);
139  ConstTensor biases(biasesInfo, qBiasesData);
140 
141  INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
142  inputInfo,
143  outputInfo,
144  weights,
145  Optional<ConstTensor>(biases));
146 
147 
148  EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
149  { { 0, qInputData } },
150  { { 0, qExpectedOutputData } },
151  backends);
152 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
DataLayout
Definition: Types.hpp:49
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
bool m_BiasEnabled
Enable/disable bias.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
void TransposeConvolution2dEndToEnd(const std::vector< armnn::BackendId > &backends, armnn::DataLayout dataLayout)
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
armnn::TensorShape MakeTensorShape(unsigned int batches, unsigned int channels, unsigned int height, unsigned int width, armnn::DataLayout layout)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:197
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:478