ArmNN
 21.11
Convolution3dEndToEndTestImpl.hpp File Reference
#include "EndToEndTestImpl.hpp"
#include "QuantizeHelper.hpp"
#include <ResolveType.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <map>
#include <vector>

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
void Convolution3dEndToEnd (const std::vector< armnn::BackendId > &backends, armnn::DataLayout dataLayout)
 

Function Documentation

◆ Convolution3dEndToEnd()

void Convolution3dEndToEnd ( const std::vector< armnn::BackendId > &  backends,
armnn::DataLayout  dataLayout 
)

Definition at line 49 of file Convolution3dEndToEndTestImpl.hpp.

References Convolution3dDescriptor::m_BiasEnabled, Convolution3dDescriptor::m_DataLayout, Convolution3dDescriptor::m_PadBack, Convolution3dDescriptor::m_PadBottom, Convolution3dDescriptor::m_PadFront, Convolution3dDescriptor::m_PadLeft, Convolution3dDescriptor::m_PadRight, Convolution3dDescriptor::m_PadTop, Convolution3dDescriptor::m_StrideX, Convolution3dDescriptor::m_StrideY, Convolution3dDescriptor::m_StrideZ, armnn::NCDHW, and PermuteTensorNdhwcToNcdhw().

51 {
52  using namespace armnn;
53  using T = ResolveType<ArmnnType>;
54  using BT = ResolveType<ArmnnBType>;
55 
56  const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
57  const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
58 
59  TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset, true);
60  TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
61  TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
62  TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
63 
64  std::vector<float> inputData =
65  {
66  0.0f, 1.0f, 2.0f, 3.0f, 4.0f,
67  5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
68  10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
69  15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
70 
71  20.0f, 21.0f, 22.0f, 23.0f, 24.0f,
72  25.0f, 26.0f, 27.0f, 28.0f, 29.0f,
73  30.0f, 31.0f, 32.0f, 33.0f, 34.0f,
74  35.0f, 36.0f, 37.0f, 38.0f, 39.0f,
75  40.0f, 41.0f, 42.0f, 43.0f, 44.0f,
76 
77  45.0f, 46.0f, 47.0f, 48.0f, 49.0f,
78  50.0f, 51.0f, 52.0f, 53.0f, 54.0f,
79  55.0f, 56.0f, 57.0f, 58.0f, 59.0f,
80  60.0f, 61.0f, 62.0f, 63.0f, 64.0f,
81  65.0f, 66.0f, 67.0f, 68.0f, 69.0f,
82 
83  70.0f, 71.0f, 72.0f, 73.0f, 74.0f,
84  75.0f, 76.0f, 77.0f, 78.0f, 79.0f,
85  80.0f, 81.0f, 82.0f, 83.0f, 84.0f,
86  85.0f, 86.0f, 87.0f, 88.0f, 89.0f,
87  90.0f, 91.0f, 92.0f, 93.0f, 94.0f,
88  95.0f, 96.0f, 97.0f, 98.0f, 99.0f,
89 
90  100.0f, 101.0f, 102.0f, 103.0f, 104.0f,
91  105.0f, 106.0f, 107.0f, 108.0f, 109.0f,
92  110.0f, 111.0f, 112.0f, 113.0f, 114.0f,
93  115.0f, 116.0f, 117.0f, 118.0f, 119.0f,
94  120.0f, 121.0f, 122.0f, 123.0f, 124.0f
95  };
96 
97  std::vector<float> weightsData =
98  {
99  1.0f, 1.0f, 1.0f,
100  1.0f, 1.0f, 1.0f,
101  1.0f, 1.0f, 1.0f,
102 
103  0.0f, 0.0f, 0.0f,
104  0.0f, 0.0f, 0.0f,
105  0.0f, 0.0f, 0.0f,
106 
107  1.0f, 1.0f, 1.0f,
108  1.0f, 1.0f, 1.0f,
109  1.0f, 1.0f, 1.0f,
110  };
111 
112  std::vector<float> biasesData = { 1.f };
113 
114  std::vector<float> expectedOutputData =
115  {
116  559.0f, 595.0f,
117 
118  739.0f, 775.0f,
119 
120  1459.0f, 1495.0f,
121 
122  1639.0f, 1675.0f,
123  };
124 
125  Convolution3dDescriptor descriptor;
126  descriptor.m_PadLeft = 0;
127  descriptor.m_PadRight = 0;
128  descriptor.m_PadTop = 0;
129  descriptor.m_PadBottom = 0;
130  descriptor.m_PadFront = 0;
131  descriptor.m_PadBack = 0;
132  descriptor.m_StrideX = 2;
133  descriptor.m_StrideY = 2;
134  descriptor.m_StrideZ = 2;
135  descriptor.m_BiasEnabled = true;
136  descriptor.m_DataLayout = dataLayout;
137 
138  // Permute input and output if NCDHW.
139  if (dataLayout == DataLayout::NCDHW)
140  {
141  PermuteTensorNdhwcToNcdhw(inputInfo, inputData);
142  PermuteTensorNdhwcToNcdhw(outputInfo, expectedOutputData);
143  }
144 
145  // Quantize data
146  std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
147  std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
148  std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
149 
150  std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
151 
152  ConstTensor weights(weightsInfo, qWeightsData);
153  ConstTensor biases(biasesInfo, qBiasesData);
154 
155  INetworkPtr network = CreateConvolution3dNetwork(descriptor,
156  inputInfo,
157  weightsInfo,
158  biasesInfo,
159  outputInfo,
160  weights,
161  biases);
162 
163  EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
164  { { 0, qInputData } },
165  { { 0, qExpectedOutputData } },
166  backends);
167 }
void PermuteTensorNdhwcToNcdhw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
uint32_t m_PadTop
Padding top value in the height dimension.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:197
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.