ArmNN
 20.05
Conv2dTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Convolution2d3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Convolution2d2x3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x5Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x3Stride2x2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x3NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x5Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x5QSymm16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x3QSymm16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< uint8_t, 4 > Convolution1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< float, 4 > CompareConvolution2dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
 
LayerTestResult< uint8_t, 4 > Convolution2dPerAxisQuantTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout &dataLayout)
 
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16SmallValueTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout &dataLayout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dMult4Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dMult2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
template<typename T >
LayerTestResult< T, 4 > CompareDepthwiseConvolution2dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthNhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul64Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dAsymmetricTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dDepthMul1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dDepthMul1Int16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dPerAxisQuantTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > CompareDepthwiseConvolution2dFloatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > CompareDepthwiseConvolution2dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::DataLayout layout)
 

Function Documentation

◆ CompareConvolution2dTest()

LayerTestResult<float, 4> CompareConvolution2dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory 
)

Definition at line 3438 of file Conv2dTestImpl.cpp.

3442 {
3443  return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
3444  workloadFactory, memoryManager, refWorkloadFactory);
3445 }

◆ CompareDepthwiseConvolution2dFloatTest()

LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::DataLayout  layout 
)

Definition at line 3678 of file Conv2dTestImpl.cpp.

3683 {
3684  return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
3685  workloadFactory, memoryManager, refWorkloadFactory, layout);
3686 }

◆ CompareDepthwiseConvolution2dTest()

LayerTestResult<T, 4> CompareDepthwiseConvolution2dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::DataLayout  layout 
)

◆ CompareDepthwiseConvolution2dUint8Test()

LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::DataLayout  layout 
)

Definition at line 3688 of file Conv2dTestImpl.cpp.

3693 {
3694  return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
3695  workloadFactory, memoryManager, refWorkloadFactory, layout);
3696 }

◆ Convolution1dTest()

LayerTestResult<float, 4> Convolution1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 3328 of file Conv2dTestImpl.cpp.

3332 {
3333  return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3334  workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
3335 }

◆ Convolution1dUint8Test()

LayerTestResult<uint8_t, 4> Convolution1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 3337 of file Conv2dTestImpl.cpp.

3341 {
3342  return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3343  workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
3344 }

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test()

LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1212 of file Conv2dTestImpl.cpp.

1217 {
1218  armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1219  std::vector<float> inputNoQuantizedValues =
1220  {
1221  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1222  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1223  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1224  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1225  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1226  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1227  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1228  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1229  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1230  1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1231  };
1232 
1233  armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
1234  std::vector<float> kernelNoQuantizedValues =
1235  {
1236  1, 2,
1237  3, 4
1238  };
1239 
1240  // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
1241  // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
1242  // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
1243  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1244  std::vector<float> outputExpectedNoQuantizedValues =
1245  {
1246  4, 7, 7, 3,
1247  6, 10, 10, 4,
1248  6, 10, 10, 4,
1249  2, 3, 3, 1
1250  };
1251  uint32_t padLeft = 1;
1252  uint32_t padTop = 1;
1253  uint32_t padRight = 1;
1254  uint32_t padBottom = 1;
1255 
1256  return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1257  workloadFactory,
1258  memoryManager,
1259  inputNoQuantizedValues,
1260  inputTensorInfo,
1261  kernelNoQuantizedValues,
1262  kernelTensorInfo,
1263  outputExpectedNoQuantizedValues,
1264  outputTensorInfo,
1265  2,
1266  2,
1267  layout,
1268  padLeft,
1269  padTop,
1270  padRight,
1271  padBottom,
1272  3,
1273  3,
1274  biasEnabled
1275  );
1276 }

◆ Convolution2d2x3x3Dilation3x3Test()

LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1141 of file Conv2dTestImpl.cpp.

1146 {
1147  armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1148  std::vector<float> inputNoQuantizedValues =
1149  {
1150  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1151  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1152  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1153  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1154  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1155  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1156  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1157  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1158  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1159  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1160 
1161  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1162  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1163  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1164  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1165  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1166  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1167  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1168  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1169  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1170  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1171  };
1172 
1173  armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1174  std::vector<float> kernelNoQuantizedValues =
1175  {
1176  1, 2, 3,
1177  4, 5, 6,
1178  7, 8, 9,
1179 
1180  1, 2, 3,
1181  4, 5, 6,
1182  7, 8, 9
1183  };
1184 
1185  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1186  // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1187  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1188  std::vector<float> outputExpectedNoQuantizedValues =
1189  {
1190  12., 10., 10., 10.,
1191  12., 10., 10., 10.,
1192  12., 10., 10., 10.,
1193  6., 4., 4., 4.
1194  };
1195 
1196  return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1197  workloadFactory,
1198  memoryManager,
1199  inputNoQuantizedValues,
1200  inputTensorInfo,
1201  kernelNoQuantizedValues,
1202  kernelTensorInfo,
1203  outputExpectedNoQuantizedValues,
1204  outputTensorInfo,
1205  3,
1206  3,
1207  layout,
1208  biasEnabled);
1209 }

◆ Convolution2d3x3Dilation3x3Test()

LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1085 of file Conv2dTestImpl.cpp.

1090 {
1091  armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1092  std::vector<float> inputNoQuantizedValues =
1093  {
1094  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1095  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1096  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1097  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1098  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1099  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1100  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1101  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1102  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1103  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1104  };
1105 
1106  armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1107  std::vector<float> kernelNoQuantizedValues =
1108  {
1109  1, 2, 3,
1110  4, 5, 6,
1111  7, 8, 9
1112  };
1113 
1114  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1115  // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1116  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1117  std::vector<float> outputExpectedNoQuantizedValues =
1118  {
1119  6., 5., 5., 5.,
1120  6., 5., 5., 5.,
1121  6., 5., 5., 5.,
1122  3., 2., 2., 2.
1123  };
1124 
1125  return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1126  workloadFactory,
1127  memoryManager,
1128  inputNoQuantizedValues,
1129  inputTensorInfo,
1130  kernelNoQuantizedValues,
1131  kernelTensorInfo,
1132  outputExpectedNoQuantizedValues,
1133  outputTensorInfo,
1134  3,
1135  3,
1136  layout,
1137  biasEnabled);
1138 }

◆ Convolution2d3x3Stride2x2BFloat16SmallValueTest()

LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout dataLayout 
)

Definition at line 1485 of file Conv2dTestImpl.cpp.

References armnn::BFloat16, armnn::Float32, armnn::IgnoreUnused(), and SimpleConvolution2dNhwcTestImpl().

1490 {
1491  // BFloat16 input and weight, Float32 output
1492  armnn::IgnoreUnused(biasEnabled);
1493 
1494  // Input is a single-batch, 1 channel, 5x5 image.
1495  armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
1496 
1497  std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1498  {
1499  0.0367984f, // 0.0368652
1500  0.0380895f, // 0.0380859
1501  0.0420157f, // 0.0419922
1502  0.0675631f, // 0.0673828
1503  0.0938920f, // 0.09375
1504  0.0476106f, // 0.0476074
1505  0.1035490f, // 0.103516
1506  0.1260370f, // 0.125977
1507  0.0461647f, // 0.0461426
1508  0.0883828f, // 0.0883789
1509  0.1159540f, // 0.115723
1510  0.0498519f, // 0.0498047
1511  0.0104630f, // 0.010437
1512  0.0154114f, // 0.0154419
1513  0.00137681f, // 0.00137329
1514  0.0344238f, // 0.0344616
1515  0.0356445f, // 0.0355693
1516  0.0495605f, // 0.0495018
1517  0.0683594f, // 0.0683308
1518  0.0991211f, // 0.0988837
1519  0.0461426f, // 0.0461838
1520  0.0996094f, // 0.0997546
1521  0.1269530f, // 0.127099
1522  0.0393066f, // 0.0392791
1523  0.103516f // 0.103641
1524  },
1525  1.0f, 0);
1526 
1527  auto input = MakeTensor<armnn::BFloat16, 4>(inputDesc, inputValues);
1528 
1529  // Use a 3x3 kernel.
1530  armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1531 
1532  std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1533  {
1534  -0.126184f, // -0.125977
1535  -0.150468f, // -0.150391
1536  -0.101412f, // -0.101562
1537  -0.0586369f,// -0.0585938
1538  -0.0865864f,// -0.0864258
1539  -0.0435089f,// -0.043457
1540  0.0347555f, // 0.034668
1541  0.0323111f, // 0.0322266
1542  0.0385381f // 0.0385742
1543  },
1544  1.0f, 0);
1545 
1546  auto kernel = MakeTensor<armnn::BFloat16, 4>(kernelDesc, kernelValues);
1547 
1548  // Expected output is a single-batch, 1 channel, 3x3 image.
1549  armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
1550 
1551  // Expected output (with results if calculated as FP32 in the comments)
1552  const std::vector<float> outputData =
1553  {
1554  0.000686645508f, // 0.000685
1555  0.000640869141f, // 0.000639
1556  -0.00759887695f, // -0.007631
1557  -0.02734375f, // -0.027388
1558  -0.0356445312f, // -0.035737
1559  -0.0145874023f, // -0.014568
1560  -0.0170898438f, // -0.017124
1561  -0.0373535156f, // -0.037431
1562  -0.0346679688f // -0.034808
1563  };
1564 
1565  boost::multi_array<float, 4> expectedOutput = MakeTensor<float, 4>(outputDesc, outputData);
1566 
1567  uint32_t padLeft = 1;
1568  uint32_t padTop = 1;
1569  uint32_t padRight = 1;
1570  uint32_t padBottom = 1;
1571  uint32_t strideX = 2;
1572  uint32_t strideY = 2;
1573 
1576  workloadFactory,
1577  memoryManager,
1578  input,
1579  kernel,
1580  boost::multi_array<float, 1>(),
1581  expectedOutput,
1582  dataLayout,
1583  1.0f,
1584  0,
1585  padLeft,
1586  padTop,
1587  padRight,
1588  padBottom,
1589  strideX,
1590  strideY);
1591 }
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const boost::multi_array< T, 4 > &input, const boost::multi_array< T, 4 > &kernel, const boost::multi_array< B, 1 > &bias, const boost::multi_array< O, 4 > &outputExpected, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
void IgnoreUnused(Ts &&...)

◆ Convolution2d3x3Stride2x2BFloat16Test()

LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout dataLayout 
)

Definition at line 1377 of file Conv2dTestImpl.cpp.

References armnn::BFloat16, armnn::Float32, armnn::IgnoreUnused(), and SimpleConvolution2dNhwcTestImpl().

1382 {
1383  // BFloat16 input and weight, Float32 output
1384  armnn::IgnoreUnused(biasEnabled);
1385 
1386  // Input is a single-batch, 1 channel, 5x5 image.
1387  armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
1388 
1389  std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1390  {
1391  10.0367984f, // 10.0625
1392  2.0380895f, // 2.03125
1393  15.0420157f, // 15.0625
1394  22.0675631f, // 22.125
1395  8.0938920f, // 8.125
1396  5.0476106f, // 5.0625
1397  80.1035490f, // 80
1398  100.1260370f, // 100
1399  55.0461647f, // 55
1400  120.0883828f, // 120
1401  9.1159540f, // 9.125
1402  90.0498519f, // 90
1403  200.0104630f, // 200
1404  30.0154114f, // 30
1405  75.00137681f, // 75
1406  30.0344238f, // 30
1407  25.0356445f, // 25
1408  130.0495605f, // 130
1409  60.0683594f, // 60
1410  35.0991211f, // 35
1411  8.0461426f, // 8.0625
1412  12.0996094f, // 12.125
1413  98.1269530f, // 98
1414  125.0393066f, // 125
1415  5.103516f // 5.0937
1416  },
1417  1.0f, 0);
1418 
1419  auto input = MakeTensor<armnn::BFloat16, 4>(inputDesc, inputValues);
1420 
1421  // Use a 3x3 kernel.
1422  armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1423 
1424  std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1425  {
1426  -0.126184f, // -0.125977
1427  -0.150468f, // -0.150391
1428  -0.101412f, // -0.101562
1429  -0.0586369f,// -0.0585938
1430  -0.0865864f,// -0.0864258
1431  -0.0435089f,// -0.043457
1432  0.0347555f, // 0.034668
1433  0.0323111f, // 0.0322266
1434  0.0385381f // 0.0385742
1435  },
1436  1.0f, 0);
1437 
1438  auto kernel = MakeTensor<armnn::BFloat16, 4>(kernelDesc, kernelValues);
1439 
1440  // Expected output is a single-batch, 1 channel, 3x3 image.
1441  armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
1442 
1443  // Expected output (with results if calculated as FP32 in the comments)
1444  const std::vector<float> outputData =
1445  {
1446  2.296875f, // 2.29240716
1447  5.75f, // 5.75851926
1448  3.78125f, // 3.79855026
1449  -11.625f, // -11.65498118
1450  -47.25f, // -47.27316893
1451  -30.0f, // -30.04771684
1452  -8.25f, // -8.28126168
1453  -43.5f, // -43.46531337
1454  -20.625f // -20.63477281
1455  };
1456 
1457  boost::multi_array<float, 4> expectedOutput = MakeTensor<float, 4>(outputDesc, outputData);
1458 
1459  uint32_t padLeft = 1;
1460  uint32_t padTop = 1;
1461  uint32_t padRight = 1;
1462  uint32_t padBottom = 1;
1463  uint32_t strideX = 2;
1464  uint32_t strideY = 2;
1465 
1468  workloadFactory,
1469  memoryManager,
1470  input,
1471  kernel,
1472  boost::multi_array<float, 1>(),
1473  expectedOutput,
1474  dataLayout,
1475  1.0f,
1476  0,
1477  padLeft,
1478  padTop,
1479  padRight,
1480  padBottom,
1481  strideX,
1482  strideY);
1483 }
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const boost::multi_array< T, 4 > &input, const boost::multi_array< T, 4 > &kernel, const boost::multi_array< B, 1 > &bias, const boost::multi_array< O, 4 > &outputExpected, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
void IgnoreUnused(Ts &&...)

◆ Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest()

LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::DataLayout  layout 
)

Definition at line 3318 of file Conv2dTestImpl.cpp.

References Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(), and armnn::Float32.

3322 {
3324  <armnn::DataType::Float32, armnn::DataType::Float32>(
3325  workloadFactory, memoryManager, layout, 0.0f, 0);
3326 }
LayerTestResult< T, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout, float qScale, int32_t qOffset)

◆ Convolution2dAsymmetricPaddingTest()

LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::DataLayout  layout 
)

Definition at line 3309 of file Conv2dTestImpl.cpp.

3313 {
3314  return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3315  workloadFactory, memoryManager, layout, 0.0f, 0);
3316 }

◆ Convolution2dPerAxisQuantTest()

LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::DataLayout  layout 
)

Definition at line 3346 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConvolution2d(), IWorkloadFactory::CreateTensorHandle(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, armnn::NCHW, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and PermuteTensorNhwcToNchw().

3350 {
3351  using namespace armnn;
3352 
3353  const DataType inputType = DataType::QAsymmU8;
3354  const DataType kernelType = DataType::QSymmS8;
3355  const DataType biasType = DataType::Signed32;
3356 
3357  TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3358  TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3359 
3360  const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3361  constexpr unsigned int quantDimension = 0;
3362 
3363  TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
3364 
3365  const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
3366  TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
3367 
3368  std::vector<uint8_t> inputData =
3369  {
3370  138, 108, 138, 108, 138, 108
3371  };
3372 
3373  std::vector<int8_t> kernelData =
3374  {
3375  1, 2, 1, 2, 1, 2
3376  };
3377 
3378  std::vector<int32_t> biasData =
3379  {
3380  4, 4, 4
3381  };
3382 
3383  std::vector<uint8_t> expectedOutputData =
3384  {
3385  121, 118, 115, 121, 118, 115, 121, 118, 115
3386  };
3387 
3388  if (layout == DataLayout::NCHW)
3389  {
3390  PermuteTensorNhwcToNchw(inputInfo, inputData);
3391  PermuteTensorNhwcToNchw(kernelInfo, kernelData);
3392  PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3393  }
3394 
3395  Convolution2dDescriptor descriptor;
3396  descriptor.m_StrideX = 1;
3397  descriptor.m_StrideY = 1;
3398  descriptor.m_PadLeft = 0;
3399  descriptor.m_PadRight = 0;
3400  descriptor.m_PadTop = 0;
3401  descriptor.m_PadBottom = 0;
3402  descriptor.m_BiasEnabled = true;
3403  descriptor.m_DataLayout = layout;
3404 
3405  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
3406  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
3407 
3408  WorkloadInfo workloadInfo;
3409  ScopedCpuTensorHandle weightTensor(kernelInfo);
3410  ScopedCpuTensorHandle biasTensor(biasInfo);
3411 
3412  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3413  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3414 
3415  Convolution2dQueueDescriptor queueDescriptor;
3416  queueDescriptor.m_Parameters = descriptor;
3417  queueDescriptor.m_Weight = &weightTensor;
3418  queueDescriptor.m_Bias = &biasTensor;
3419 
3420  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3421  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3422 
3423  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConvolution2d(queueDescriptor, workloadInfo);
3424  inputHandle->Allocate();
3425  outputHandle->Allocate();
3426 
3427  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3428 
3429  ExecuteWorkload(*workload, memoryManager);
3430 
3431  LayerTestResult<uint8_t, 4> ret(outputInfo);
3432  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
3433  ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
3434 
3435  return ret;
3436 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2020 ARM Limited.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataType
Definition: Types.hpp:32
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test()

LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2651 of file Conv2dTestImpl.cpp.

2656 {
2657  armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
2658  std::vector<float> inputNoQuantizedValues =
2659  {
2660  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2661  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2662  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2663  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2664  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2665  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2666  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2667  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2668  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2669  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2670 
2671  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2672  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2673  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2674  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2675  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2676  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2677  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2678  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2679  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2680  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2681  };
2682 
2683  armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
2684  std::vector<float> kernelNoQuantizedValues =
2685  {
2686  1, 2, 3,
2687  4, 5, 6,
2688  7, 8, 9,
2689 
2690  1, 2, 3,
2691  4, 5, 6,
2692  7, 8, 9
2693  };
2694 
2695  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2696  // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2697  armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
2698  std::vector<float> outputExpectedNoQuantizedValues =
2699  {
2700  6., 5., 5., 5.,
2701  6., 5., 5., 5.,
2702  6., 5., 5., 5.,
2703  3., 2., 2., 2.,
2704 
2705  6., 5., 5., 5.,
2706  6., 5., 5., 5.,
2707  6., 5., 5., 5.,
2708  3., 2., 2., 2.
2709  };
2710 
2711  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2712  workloadFactory,
2713  memoryManager,
2714  inputNoQuantizedValues,
2715  inputTensorInfo,
2716  kernelNoQuantizedValues,
2717  kernelTensorInfo,
2718  outputExpectedNoQuantizedValues,
2719  outputTensorInfo,
2720  3,
2721  3,
2722  layout,
2723  biasEnabled);
2724 }

◆ DepthwiseConvolution2d3x3Dilation3x3Test()

LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2595 of file Conv2dTestImpl.cpp.

2600 {
2601  armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
2602  std::vector<float> inputNoQuantizedValues =
2603  {
2604  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2605  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2606  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2607  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2608  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2609  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2610  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2611  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2612  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2613  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2614  };
2615 
2616  armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
2617  std::vector<float> kernelNoQuantizedValues =
2618  {
2619  1, 2, 3,
2620  4, 5, 6,
2621  7, 8, 9
2622  };
2623 
2624  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2625  // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2626  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
2627  std::vector<float> outputExpectedNoQuantizedValues =
2628  {
2629  6., 5., 5., 5.,
2630  6., 5., 5., 5.,
2631  6., 5., 5., 5.,
2632  3., 2., 2., 2.
2633  };
2634 
2635  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2636  workloadFactory,
2637  memoryManager,
2638  inputNoQuantizedValues,
2639  inputTensorInfo,
2640  kernelNoQuantizedValues,
2641  kernelTensorInfo,
2642  outputExpectedNoQuantizedValues,
2643  outputTensorInfo,
2644  3,
2645  3,
2646  layout,
2647  biasEnabled);
2648 }

◆ DepthwiseConvolution2dAsymmetricTest()

LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3508 of file Conv2dTestImpl.cpp.

3513 {
3514  return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3515  workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
3516 }

◆ DepthwiseConvolution2dDepthMul1Int16Test()

LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3560 of file Conv2dTestImpl.cpp.

3565 {
3566  return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3567  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3568 }

◆ DepthwiseConvolution2dDepthMul1Test()

LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3466 of file Conv2dTestImpl.cpp.

3471 {
3472  return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3473  workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
3474 }

◆ DepthwiseConvolution2dDepthMul1Uint8Test()

LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3528 of file Conv2dTestImpl.cpp.

3533 {
3534  return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3535  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3536 }

◆ DepthwiseConvolution2dDepthMul64Test()

LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 3476 of file Conv2dTestImpl.cpp.

References armnn::Float32, and armnn::NCHW.

3479 {
3480  armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
3481  auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
3482 
3483  std::vector<float> kernelData;
3484  std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3485  for (unsigned int i = 0; i < 64; ++i)
3486  {
3487  kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3488  }
3489  armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
3490  auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
3491 
3492  std::vector<float> expectedOutputData(64, 0.f);
3493  armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
3494  auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
3495 
3496  return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3497  workloadFactory,
3498  memoryManager,
3499  input,
3500  kernel,
3501  boost::multi_array<float, 1>(),
3502  expectedOutput,
3503  0.f,
3504  0,
3506 }

◆ DepthwiseConvolution2dDepthNhwcTest()

LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 3457 of file Conv2dTestImpl.cpp.

3461 {
3462  return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3463  workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
3464 }

◆ DepthwiseConvolution2dInt16Test()

LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3550 of file Conv2dTestImpl.cpp.

3555 {
3556  return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3557  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3558 }

◆ DepthwiseConvolution2dMult2Test()

LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2819 of file Conv2dTestImpl.cpp.

2824 {
2825  armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2826  std::vector<float> inputNoQuantizedValues =
2827  {
2828  10.0, 10.0, 10.0,
2829  10.0, 10.0, 10.0,
2830  10.0, 10.0, 10.0,
2831 
2832  21.0, 22.0, 23.0,
2833  24.0, 25.0, 26.0,
2834  27.0, 28.0, 29.0
2835  };
2836 
2837  armnn::TensorInfo kernelTensorInfo({ 2, 2, 2, 2}, ArmnnType);
2838 
2839  std::vector<float> kernelNoQuantizedValues =
2840  {
2841  0.25f, 0.25f,
2842  0.25f, 0.25f,
2843 
2844  0.2f , 0.0f,
2845  0.0f , 0.0f,
2846 
2847  0.0f , 0.0f,
2848  0.0f , 0.1f,
2849 
2850  0.0f , 0.3f,
2851  0.0f , 0.0f
2852 
2853  };
2854 
2855  armnn::TensorInfo outputTensorInfo({ 1, 4, 2, 2}, ArmnnType);
2856  std::vector<float> outputExpectedNoQuantizedValues =
2857  {
2858  10.f, 10.f,
2859  10.f, 10.f,
2860 
2861  1.f, 1.f,
2862  1.f, 1.f,
2863 
2864  4.2000003f, 4.4f,
2865  4.8f, 5.f,
2866 
2867  6.6000004f, 6.9f,
2868  7.5000005f, 7.8f
2869  };
2870 
2871 
2872  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2873  workloadFactory,
2874  memoryManager,
2875  inputNoQuantizedValues,
2876  inputTensorInfo,
2877  kernelNoQuantizedValues,
2878  kernelTensorInfo,
2879  outputExpectedNoQuantizedValues,
2880  outputTensorInfo,
2881  1,
2882  1,
2883  layout,
2884  biasEnabled);
2885 }

◆ DepthwiseConvolution2dMult4Test()

LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2727 of file Conv2dTestImpl.cpp.

2732 {
2733  armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2734  std::vector<float> inputNoQuantizedValues =
2735  {
2736  10.0, 10.0, 10.0,
2737  10.0, 10.0, 10.0,
2738  10.0, 10.0, 10.0,
2739 
2740  21.0, 22.0, 23.0,
2741  24.0, 25.0, 26.0,
2742  27.0, 28.0, 29.0
2743  };
2744 
2745  armnn::TensorInfo kernelTensorInfo({ 4, 2, 2, 2}, ArmnnType);
2746 
2747  std::vector<float> kernelNoQuantizedValues =
2748  {
2749  0.25f, 0.25f,
2750  0.25f, 0.25f,
2751 
2752  0.25f, 0.25f,
2753  0.25f, 0.25f,
2754 
2755  0.0f , 0.0f,
2756  0.0f , 0.1f,
2757 
2758  0.0f , 0.0f,
2759  0.0f , 0.1f,
2760 
2761  0.2f , 0.0f,
2762  0.0f , 0.0f,
2763 
2764  0.2f , 0.0f,
2765  0.0f , 0.0f,
2766 
2767  0.0f , 0.3f,
2768  0.0f , 0.0f,
2769 
2770  0.0f , 0.3f,
2771  0.0f , 0.0f
2772  };
2773 
2774  armnn::TensorInfo outputTensorInfo({ 1, 8, 2, 2}, ArmnnType);
2775  std::vector<float> outputExpectedNoQuantizedValues =
2776  {
2777  10.f, 10.f,
2778  10.f, 10.f,
2779 
2780  1.f, 1.f,
2781  1.f, 1.f,
2782 
2783  2.f, 2.f,
2784  2.f, 2.f,
2785 
2786  3.f, 3.f,
2787  3.f, 3.f,
2788 
2789  23.f, 24.f,
2790  26.f, 27.f,
2791 
2792  2.5f, 2.6000001f,
2793  2.8f, 2.9f,
2794 
2795  4.2000003f, 4.4f,
2796  4.8f, 5.f,
2797 
2798  6.6000004f, 6.9f,
2799  7.5000005f, 7.8f
2800  };
2801 
2802 
2803  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2804  workloadFactory,
2805  memoryManager,
2806  inputNoQuantizedValues,
2807  inputTensorInfo,
2808  kernelNoQuantizedValues,
2809  kernelTensorInfo,
2810  outputExpectedNoQuantizedValues,
2811  outputTensorInfo,
2812  1,
2813  1,
2814  layout,
2815  biasEnabled);
2816 }

◆ DepthwiseConvolution2dPerAxisQuantTest()

LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::DataLayout  layout 
)

Definition at line 3570 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), IWorkloadFactory::CreateTensorHandle(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, armnn::NCHW, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and PermuteTensorNhwcToNchw().

3574 {
3575  using namespace armnn;
3576 
3577  const DataType inputType = DataType::QAsymmU8;
3578  const DataType kernelType = DataType::QSymmS8;
3579  const DataType biasType = DataType::Signed32;
3580 
3581  TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128); // N H W C
3582  TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128); // N H W C
3583 
3584  const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
3585  const unsigned int quantDimension = 0;
3586  TensorInfo kernelInfo({ 2, 2, 2, 2 }, kernelType, quantScales, quantDimension); // M I H W
3587 
3588  const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3589  constexpr unsigned int biasQuantDimension = 0;
3590  TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
3591 
3592  std::vector<uint8_t> inputData =
3593  {
3594  129, 130,
3595  129, 130,
3596  129, 130,
3597  129, 130,
3598  129, 130,
3599  129, 130,
3600  129, 130,
3601  129, 130,
3602  129, 130
3603  };
3604 
3605  std::vector<int8_t> kernelData =
3606  {
3607  1, 1, 1, 1,
3608  1, 1, 1, 1,
3609  1, 1, 1, 1,
3610  1, 1, 1, 1
3611  };
3612 
3613  std::vector<int32_t> biasData =
3614  {
3615  4, 4, 4, 4
3616  };
3617 
3618  std::vector<uint8_t> expectedOutputData =
3619  {
3620  132, 130, 134, 131,
3621  132, 130, 134, 131,
3622  132, 130, 134, 131,
3623  132, 130, 134, 131
3624  };
3625 
3626  if (layout == DataLayout::NCHW)
3627  {
3628  PermuteTensorNhwcToNchw(inputInfo, inputData);
3629  PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3630  }
3631 
3633  descriptor.m_StrideX = 1;
3634  descriptor.m_StrideY = 1;
3635  descriptor.m_PadLeft = 0;
3636  descriptor.m_PadRight = 0;
3637  descriptor.m_PadTop = 0;
3638  descriptor.m_PadBottom = 0;
3639  descriptor.m_DilationX = 1;
3640  descriptor.m_DilationY = 1;
3641  descriptor.m_BiasEnabled = true;
3642  descriptor.m_DataLayout = layout;
3643 
3644  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
3645  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
3646 
3647  WorkloadInfo workloadInfo;
3648  ScopedCpuTensorHandle weightTensor(kernelInfo);
3649  ScopedCpuTensorHandle biasTensor(biasInfo);
3650 
3651  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3652  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3653 
3654  DepthwiseConvolution2dQueueDescriptor queueDescriptor;
3655  queueDescriptor.m_Parameters = descriptor;
3656  queueDescriptor.m_Weight = &weightTensor;
3657  queueDescriptor.m_Bias = &biasTensor;
3658 
3659  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3660  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3661 
3662  std::unique_ptr<IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(queueDescriptor, workloadInfo);
3663  inputHandle->Allocate();
3664  outputHandle->Allocate();
3665 
3666  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3667 
3668  ExecuteWorkload(*workload, memoryManager);
3669 
3670  LayerTestResult<uint8_t, 4> ret(outputInfo);
3671 
3672  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
3673  ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
3674 
3675  return ret;
3676 }
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
Copyright (c) 2020 ARM Limited.
uint32_t m_DilationY
Dilation factor value for height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataType
Definition: Types.hpp:32
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PadRight
Padding right value in the width dimension.
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)

◆ DepthwiseConvolution2dTest()

LayerTestResult<float, 4> DepthwiseConvolution2dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3447 of file Conv2dTestImpl.cpp.

3452 {
3453  return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3454  workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
3455 }

◆ DepthwiseConvolution2dUint8Test()

LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3518 of file Conv2dTestImpl.cpp.

3523 {
3524  return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3525  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3526 }

◆ SimpleConvolution2d3x3NhwcTest()

LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 3250 of file Conv2dTestImpl.cpp.

References armnn::NHWC.

3254 {
3255  return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3256  workloadFactory,
3257  memoryManager,
3258  0.f,
3259  0,
3260  biasEnabled,
3262 }

◆ SimpleConvolution2d3x3QSymm16Test()

LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3299 of file Conv2dTestImpl.cpp.

3304 {
3305  return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3306  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3307 }

◆ SimpleConvolution2d3x3Stride2x2Test()

LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3264 of file Conv2dTestImpl.cpp.

3269 {
3270  return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3271  workloadFactory,
3272  memoryManager,
3273  0.f,
3274  0,
3275  biasEnabled,
3276  layout);
3277 }

◆ SimpleConvolution2d3x3Test()

LayerTestResult<float, 4> SimpleConvolution2d3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3240 of file Conv2dTestImpl.cpp.

3245 {
3246  return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3247  workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
3248 }

◆ SimpleConvolution2d3x3Uint8Test()

LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3279 of file Conv2dTestImpl.cpp.

3284 {
3285  return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3286  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3287 }

◆ SimpleConvolution2d3x5QSymm16Test()

LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3289 of file Conv2dTestImpl.cpp.

3294 {
3295  return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3296  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3297 }

◆ SimpleConvolution2d3x5Test()

LayerTestResult<float, 4> SimpleConvolution2d3x5Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3220 of file Conv2dTestImpl.cpp.

3225 {
3226  return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3227  workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
3228 }

◆ SimpleConvolution2d3x5Uint8Test()

LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3230 of file Conv2dTestImpl.cpp.

3235 {
3236  return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3237  workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
3238 }

◆ SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest()

LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 3538 of file Conv2dTestImpl.cpp.

3541 {
3542  return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3543  workloadFactory,
3544  memoryManager,
3545  0.f,
3546  0,
3547  false);
3548 }