From 483c811ea6fd0e7801aac1afd979ed02a649064b Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 1 Jun 2021 09:24:52 +0100 Subject: IVGCVSW-5962 Remove boost::multi_array * Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan Signed-off-by: Sadik Armagan Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743 --- .../test/layerTests/NormalizationTestImpl.cpp | 244 ++++++++++++++++----- 1 file changed, 191 insertions(+), 53 deletions(-) (limited to 'src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp index b52dcd5303..153afd9cd7 100644 --- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp @@ -44,16 +44,18 @@ LayerTestResult SimpleNormalizationTestImpl( auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - LayerTestResult ret(outputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, std::vector({ + std::vector input = + { // Batch #0 1.0f, 2.0f, 3.0f, 4.0f, // Batch #1 5.0f, 6.0f, 7.0f, 8.0f - })); + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); float alpha = 1.f; float beta = 1.f; @@ -75,7 +77,7 @@ LayerTestResult SimpleNormalizationTestImpl( data.m_Parameters.m_K = kappa; data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW; - armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]); + armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data()); armnn::NormalizationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); @@ -85,11 +87,11 @@ LayerTestResult SimpleNormalizationTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); switch (normMethod) { @@ -104,23 +106,34 @@ LayerTestResult SimpleNormalizationTestImpl( // pow((kappa + (accumulatedScale * alpha)), beta) // ...where accumulatedScale is the sum of every element squared. float divisor[inputNum]; - for(int i = 0; i < armnn::numeric_cast(inputNum); i++) + + float accumulatedScale1 = 0.0f; + for (size_t i = 0; i < input.size()/2; ++i) + { + accumulatedScale1 += input[i]*input[i]; + } + + float accumulatedScale2 = 0.0f; + for (size_t i = input.size()/2; i < input.size(); ++i) { - float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] + - input[i][0][0][1]*input[i][0][0][1] + - input[i][0][1][0]*input[i][0][1][0] + - input[i][0][1][1]*input[i][0][1][1]; - divisor[i] = powf((kappa + accumulatedScale * alpha), beta); + accumulatedScale2 += input[i]*input[i]; } - ret.outputExpected = MakeTensor(outputTensorInfo, - std::vector({input[0][0][0][0]/divisor[0], - input[0][0][0][1]/divisor[0], - input[0][0][1][0]/divisor[0], - input[0][0][1][1]/divisor[0], - input[1][0][0][0]/divisor[1], - input[1][0][0][1]/divisor[1], - input[1][0][1][0]/divisor[1], - input[1][0][1][1]/divisor[1]})); + + divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta); + divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta); + + std::vector output; + unsigned int divisorIndex = 0; + for (size_t i = 0; i < input.size(); ++i) + { + if (i == input.size()/2) + { + divisorIndex++; + } + output.emplace_back(input[i]/divisor[divisorIndex]); + } + + expectedOutput = output; break; } case armnn::NormalizationAlgorithmChannel::Across: @@ -131,19 +144,14 @@ LayerTestResult SimpleNormalizationTestImpl( // ...where adjacent channels means within half the normSize for the channel // The test data has only one channel, so this is simplified below. std::vector outputVector; - for (int n = 0; n < armnn::numeric_cast(inputNum); ++n) + + for (unsigned int i = 0; i < input.size(); ++i) { - for (int h = 0; h < armnn::numeric_cast(inputHeight); ++h) - { - for (int w = 0; w < armnn::numeric_cast(inputWidth); ++w) - { - float accumulatedScale = input[n][0][h][w]*input[n][0][h][w]; - float scale = powf((kappa + accumulatedScale * alpha), -beta); - outputVector.push_back(input[n][0][h][w] * scale); - } - } + float accumulatedScale = input[i]*input[i]; + float scale = powf((kappa + accumulatedScale * alpha), -beta); + outputVector.push_back(input[i] * scale); } - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); + expectedOutput = outputVector; break; } default: @@ -162,7 +170,10 @@ LayerTestResult SimpleNormalizationTestImpl( } } - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult SimpleNormalizationNhwcTestImpl( @@ -188,16 +199,18 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - LayerTestResult ret(outputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, std::vector({ + std::vector input = + { // Batch #0 1.0f, 2.0f, 3.0f, 4.0f, // Batch #1 5.0f, 6.0f, 7.0f, 8.0f - })); + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); float alpha = 1.f; float beta = 1.f; @@ -219,7 +232,7 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( data.m_Parameters.m_K = kappa; data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; - armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]); + armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data()); armnn::NormalizationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); @@ -229,11 +242,11 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); switch (normMethod) { @@ -243,9 +256,8 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( { case armnn::NormalizationAlgorithmChannel::Across: { - std::vector expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f, - 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f }; - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); + expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f, + 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f }; break; } default: @@ -264,7 +276,10 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( } } - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult CompareNormalizationTestImpl( @@ -297,7 +312,10 @@ LayerTestResult CompareNormalizationTestImpl( LayerTestResult ret(outputTensorInfo); - auto input = MakeRandomTensor(inputTensorInfo, 111234); + auto input = MakeRandomTensor(inputTensorInfo, 111234); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); constexpr float alpha = 1.f; constexpr float beta = 1.f; @@ -330,9 +348,9 @@ LayerTestResult CompareNormalizationTestImpl( armnn::BackendId backend = workloadFactory.GetBackendId(); const size_t reasonIfUnsupportedMaxLen = 255; char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; - ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters, - reasonIfUnsupported, reasonIfUnsupportedMaxLen); - if (!ret.supported) + ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!ret.m_Supported) { return ret; } @@ -346,19 +364,125 @@ LayerTestResult CompareNormalizationTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); ExecuteWorkload(*workload, memoryManager); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); + ret.m_ActualData = actualOutput; + ret.m_ExpectedData = expectedOutput; return ret; } +LayerTestResult AcrossChannelNormalizationTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::NormalizationAlgorithmChannel normChannel, + armnn::NormalizationAlgorithmMethod normMethod) +{ + const unsigned int inputHeight = 1; + const unsigned int inputWidth = 2; + const unsigned int inputChannels = 3; + const unsigned int inputNum = 2; + + unsigned int outputHeight = inputHeight; + unsigned int outputWidth = inputWidth; + unsigned int outputChannels = inputChannels; + unsigned int outputNum = inputNum; + + unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels }; + unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels }; + + auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + std::vector input = + { + // Batch #0 + -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f, + // Batch #1 + -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f, + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); + + float alpha = 4.f; + float beta = 0.5f; + float kappa = 9.f; + uint32_t normSize = 5; + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::NormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_NormChannelType = normChannel; + data.m_Parameters.m_NormMethodType = normMethod; + data.m_Parameters.m_NormSize = normSize; + data.m_Parameters.m_Alpha = alpha; + data.m_Parameters.m_Beta = beta; + data.m_Parameters.m_K = kappa; + data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; + + armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data()); + armnn::NormalizationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); + + std::unique_ptr workload = workloadFactory.CreateNormalization(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + switch (normMethod) + { + case armnn::NormalizationAlgorithmMethod::LocalBrightness: + { + switch (normChannel) + { + case armnn::NormalizationAlgorithmChannel::Across: + { + expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, + -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, }; + break; + } + default: + { + throw armnn::UnimplementedException("Unsupported normalisation channel type, " + "only Across and Within are supported"); + } + } + break; + } + case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough. + default: + { + throw armnn::UnimplementedException("Unsupported normalisation method type, " + "only LocalBrightness is supported"); + } + } + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + } // anonymous namespace LayerTestResult SimpleNormalizationAcrossTest( @@ -405,3 +529,17 @@ LayerTestResult CompareNormalizationTest( workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, normChannel, normMethod); } + +LayerTestResult AcrossChannelNormalizationTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness; + auto normChannel = armnn::NormalizationAlgorithmChannel::Across; + return AcrossChannelNormalizationTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + normChannel, + normMethod); +} -- cgit v1.2.1