From 483c811ea6fd0e7801aac1afd979ed02a649064b Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 1 Jun 2021 09:24:52 +0100 Subject: IVGCVSW-5962 Remove boost::multi_array * Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan Signed-off-by: Sadik Armagan Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743 --- .../test/layerTests/FakeQuantizationTestImpl.cpp | 27 +++++++++++++--------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp index 157df99d64..bbe481657d 100644 --- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp @@ -22,16 +22,17 @@ LayerTestResult FakeQuantizationTest( constexpr unsigned int width = 2; constexpr unsigned int height = 3; - const armnn::TensorInfo tensorInfo({height, width }, - armnn::DataType::Float32); + const armnn::TensorInfo tensorInfo({ height, width }, armnn::DataType::Float32); - auto input = MakeTensor(tensorInfo, std::vector({ + std::vector input = + { -10.0f, -5.0f, 0.0f, 5.0f, 10.0f, 10.0f - })); + }; - LayerTestResult ret(tensorInfo); + std::vector actualOutput(tensorInfo.GetNumElements()); + std::vector expectedOutput(tensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo); @@ -48,7 +49,7 @@ LayerTestResult FakeQuantizationTest( data.m_Parameters.m_Min = min; data.m_Parameters.m_Max = max; - armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]); + armnn::PassthroughTensorHandle refHandle(tensorInfo, expectedOutput.data()); armnn::FakeQuantizationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle); @@ -58,18 +59,22 @@ LayerTestResult FakeQuantizationTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - ret.outputExpected = MakeTensor(tensorInfo, std::vector({ + expectedOutput = + { 0.0f, 63.0f, 128.0f, 191.0f, 255.0f, 255.0f - })); + }; - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + tensorInfo.GetShape()); } -- cgit v1.2.1