aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-06-01 09:24:52 +0100
committerSadik Armagan <sadik.armagan@arm.com>2021-06-02 13:00:56 +0000
commit483c811ea6fd0e7801aac1afd979ed02a649064b (patch)
treea0969c8786528334b62043b40983fa21d54d524e /src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
parent31f86bfeb311ccc0c6ed94c35a78a51551148ea4 (diff)
downloadarmnn-483c811ea6fd0e7801aac1afd979ed02a649064b.tar.gz
IVGCVSW-5962 Remove boost::multi_array
* Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp28
1 files changed, 18 insertions, 10 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 5fbec56435..9946801aab 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -22,14 +22,19 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
- auto input = MakeTensor<float, 4>(inputTensorInfo,
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+ std::vector<float> input =
+ {
+ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ };
- LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
- { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
- 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+ std::vector<armnn::Half> expectedOutput =
+ {
+ -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+ 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h
+ };
+
+ std::vector<armnn::Half> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -44,11 +49,14 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<armnn::Half, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}