aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rwxr-xr-xsrc/backends/backendsCommon/test/Conv2dTestImpl.hpp34
-rwxr-xr-xsrc/backends/backendsCommon/test/LayerTests.cpp47
-rw-r--r--src/backends/backendsCommon/test/Pooling2dTestImpl.hpp21
3 files changed, 47 insertions, 55 deletions
diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
index 6685a8edd2..d137c8082a 100755
--- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
@@ -5,6 +5,7 @@
#pragma once
#include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
#include <string>
#include <armnn/ArmNN.hpp>
@@ -108,10 +109,12 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
// Note these tensors will use two (identical) batches.
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
- 2*outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(2*outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo kernelDesc =
+ armnnUtils::GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
// Set quantization parameters if the requested type is a quantized type.
@@ -354,9 +357,12 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Creates the tensors.
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo kernelDesc =
+ armnnUtils::GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
// Set quantization parameters if the requested type is a quantized type.
@@ -483,9 +489,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
unsigned int outputChannels = kernelChannels;
unsigned int outputNum = inputNum;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
// Set quantization parameters if the requested type is a quantized type.
@@ -629,11 +637,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
unsigned int outputChannels = inputChannels * depthMultiplier;
unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(
inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(
outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(
+ armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo<T>(
depthMultiplier, inputChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index ecd09ca024..f10d14e942 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -4,6 +4,7 @@
//
#include "LayerTests.hpp"
#include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
#include "test/TensorHelpers.hpp"
#include "TensorCopyUtils.hpp"
@@ -68,24 +69,6 @@ static std::vector<float> ConvInput3x8x16({
// 2-channel bias used by a number of Conv2d tests.
static std::vector<float> Bias2({0, 2});
-armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches,
- unsigned int numberOfChannels,
- unsigned int height,
- unsigned int width,
- const armnn::DataLayoutIndexed& dataLayout)
-{
- switch (dataLayout.GetDataLayout())
- {
- case armnn::DataLayout::NCHW:
- return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
- case armnn::DataLayout::NHWC:
- return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
- default:
- throw armnn::InvalidArgumentException("unknown data layout ["
- + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
- }
-}
-
// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
template<typename T>
boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
@@ -3859,8 +3842,8 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
@@ -3913,8 +3896,8 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
std::vector<float> inputData({
1.0f, 255.0f,
@@ -3979,8 +3962,8 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
@@ -4045,8 +4028,8 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
@@ -4109,8 +4092,8 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f,
@@ -4741,7 +4724,7 @@ LayerTestResult<float, 4> L2Normalization1dTest(
unsigned int width = 1;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
@@ -4810,7 +4793,7 @@ LayerTestResult<float, 4> L2Normalization2dTest(
unsigned int height = 1;
unsigned int width = 5;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
@@ -4855,7 +4838,7 @@ LayerTestResult<float, 4> L2Normalization3dTest(
unsigned int height = 4;
unsigned int width = 3;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
@@ -4920,7 +4903,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(
unsigned int height = 4;
unsigned int width = 3;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
index 9050fc64a6..0f33ac01a5 100644
--- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
@@ -5,6 +5,7 @@
#pragma once
#include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
#include "QuantizeHelper.hpp"
@@ -50,10 +51,10 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
- inputWidth, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
- outputWidth, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
+ inputWidth, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
+ outputWidth, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -252,8 +253,8 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -321,8 +322,8 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -441,8 +442,8 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
std::vector<T> inputData(
QuantizedVector<T>(qScale, qOffset, {