aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-06-01 09:24:52 +0100
committerSadik Armagan <sadik.armagan@arm.com>2021-06-02 13:00:56 +0000
commit483c811ea6fd0e7801aac1afd979ed02a649064b (patch)
treea0969c8786528334b62043b40983fa21d54d524e /src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
parent31f86bfeb311ccc0c6ed94c35a78a51551148ea4 (diff)
downloadarmnn-483c811ea6fd0e7801aac1afd979ed02a649064b.tar.gz
IVGCVSW-5962 Remove boost::multi_array
* Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp27
1 files changed, 16 insertions, 11 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index 157df99d64..bbe481657d 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -22,16 +22,17 @@ LayerTestResult<float, 2> FakeQuantizationTest(
constexpr unsigned int width = 2;
constexpr unsigned int height = 3;
- const armnn::TensorInfo tensorInfo({height, width },
- armnn::DataType::Float32);
+ const armnn::TensorInfo tensorInfo({ height, width }, armnn::DataType::Float32);
- auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+ std::vector<float> input =
+ {
-10.0f, -5.0f,
0.0f, 5.0f,
10.0f, 10.0f
- }));
+ };
- LayerTestResult<float, 2> ret(tensorInfo);
+ std::vector<float> actualOutput(tensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(tensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
@@ -48,7 +49,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
data.m_Parameters.m_Min = min;
data.m_Parameters.m_Max = max;
- armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+ armnn::PassthroughTensorHandle refHandle(tensorInfo, expectedOutput.data());
armnn::FakeQuantizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
@@ -58,18 +59,22 @@ LayerTestResult<float, 2> FakeQuantizationTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+ expectedOutput =
+ {
0.0f, 63.0f,
128.0f, 191.0f,
255.0f, 255.0f
- }));
+ };
- return ret;
+ return LayerTestResult<float, 2>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ tensorInfo.GetShape());
}