From 0637bf38b24bba3a3d88f34ed956111a3abddda2 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Tue, 20 Dec 2022 18:35:34 +0000 Subject: IVGCVSW-7211 Fix float16 operators being wrongly unsupported with android-nn-driver. !armnn:8862 * Added float16 mean test cases. * Float16 CTS/CTS pass on CpuAcc. Signed-off-by: Cathal Corbett Change-Id: Ibd9021d0ae4a205cc2c91555f3ae00c6dba84609 --- test/1.2/Mean.cpp | 204 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 test/1.2/Mean.cpp (limited to 'test/1.2') diff --git a/test/1.2/Mean.cpp b/test/1.2/Mean.cpp new file mode 100644 index 00000000..a2a8b7a1 --- /dev/null +++ b/test/1.2/Mean.cpp @@ -0,0 +1,204 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../DriverTestHelpers.hpp" +#include "../TestHalfTensor.hpp" + +#include <1.2/HalPolicy.hpp> + +#include + +using Half = half_float::half; + +using namespace android::hardware; +using namespace driverTestHelpers; +using namespace armnn_driver; + +using HalPolicy = hal_1_2::HalPolicy; +using RequestArgument = V1_0::RequestArgument; + +namespace +{ + +void MeanTestImpl(const TestHalfTensor& input, + const hidl_vec& axisDimensions, + const int32_t* axisValues, + int32_t keepDims, + const TestHalfTensor& expectedOutput, + bool fp16Enabled, + armnn::Compute computeDevice) +{ + auto driver = std::make_unique(DriverOptions(computeDevice, fp16Enabled)); + + HalPolicy::Model model = {}; + + AddInputOperand(model, input.GetDimensions(), V1_2::OperandType::TENSOR_FLOAT16); + + AddTensorOperand(model, + axisDimensions, + const_cast(axisValues), + HalPolicy::OperandType::TENSOR_INT32); + + AddIntOperand(model, keepDims); + + AddOutputOperand(model, expectedOutput.GetDimensions(), V1_2::OperandType::TENSOR_FLOAT16); + + model.operations.resize(1); + model.operations[0].type = HalPolicy::OperationType::MEAN; + model.operations[0].inputs = hidl_vec{ 0, 1, 2 }; + model.operations[0].outputs = hidl_vec{ 3 }; + model.relaxComputationFloat32toFloat16 = fp16Enabled; + + //android::sp preparedModel = PrepareModel(model, *driver); + android::sp preparedModel = PrepareModel_1_2(model, *driver); + + // The request's memory pools will follow the same order as the inputs + V1_0::DataLocation inLoc = {}; + inLoc.poolIndex = 0; + inLoc.offset = 0; + inLoc.length = input.GetNumElements() * sizeof(Half); + RequestArgument inArg = {}; + inArg.location = inLoc; + inArg.dimensions = input.GetDimensions(); + + // An additional memory pool is needed for the output + V1_0::DataLocation outLoc = {}; + outLoc.poolIndex = 1; + outLoc.offset = 0; + outLoc.length = expectedOutput.GetNumElements() * sizeof(Half); + RequestArgument outArg = {}; + outArg.location = outLoc; + outArg.dimensions = expectedOutput.GetDimensions(); + + // Make the request based on the arguments + V1_0::Request request = {}; + request.inputs = hidl_vec{ inArg }; + request.outputs = hidl_vec{ outArg }; + + // Set the input data + AddPoolAndSetData(input.GetNumElements(), request, input.GetData()); + + // Add memory for the output + android::sp outMemory = AddPoolAndGetData(expectedOutput.GetNumElements(), request); + const Half* outputData = static_cast(static_cast(outMemory->getPointer())); + + if (preparedModel.get() != nullptr) + { + V1_0::ErrorStatus execStatus = Execute(preparedModel, request); + DOCTEST_CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE); + } + + const Half* expectedOutputData = expectedOutput.GetData(); + for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++) + { + DOCTEST_CHECK(outputData[i] == expectedOutputData[i]); + } +} + +} // anonymous namespace + +DOCTEST_TEST_SUITE("MeanTests_1.2_CpuRef") +{ + +DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_CpuRef") +{ + using namespace half_float::literal; + + TestHalfTensor input{ armnn::TensorShape{ 4, 3, 2 }, + { 1.0_h, 2.0_h, 3.0_h, 4.0_h, 5.0_h, 6.0_h, 7.0_h, 8.0_h, 9.0_h, 10.0_h, + 11.0_h, 12.0_h, 13.0_h, 14.0_h, 15.0_h, 16.0_h, 17.0_h, 18.0_h, 19.0_h, + 20.0_h, 21.0_h, 22.0_h, 23.0_h, 24.0_h } }; + hidl_vec axisDimensions = { 2 }; + int32_t axisValues[] = { 0, 1 }; + int32_t keepDims = 0; + TestHalfTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0_h, 13.0_h } }; + + MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef); +} + +DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_CpuRef") +{ + using namespace half_float::literal; + + TestHalfTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0_h, 1.0_h, 2.0_h, 2.0_h, 3.0_h, 3.0_h } }; + hidl_vec axisDimensions = { 1 }; + int32_t axisValues[] = { 2 }; + int32_t keepDims = 1; + TestHalfTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0_h, 2.0_h } }; + + MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef); +} + +} + +#ifdef ARMCOMPUTECL_ENABLED +DOCTEST_TEST_SUITE("MeanTests_1.2_CpuAcc") +{ + DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_CpuAcc") + { + using namespace half_float::literal; + + std::vector in = { 1.0_h, 2.0_h, 3.0_h, 4.0_h, 5.0_h, 6.0_h, 7.0_h, 8.0_h, 9.0_h, 10.0_h, + 11.0_h, 12.0_h, 13.0_h, 14.0_h, 15.0_h, 16.0_h, 17.0_h, 18.0_h, 19.0_h, + 20.0_h, 21.0_h, 22.0_h, 23.0_h, 24.0_h }; + TestHalfTensor input{ armnn::TensorShape{ 4, 3, 2 }, + in}; + hidl_vec axisDimensions = { 2 }; + int32_t axisValues[] = { 0, 1 }; + int32_t keepDims = 0; + std::vector out = { 12.0_h, 13.0_h }; + TestHalfTensor expectedOutput{ armnn::TensorShape{ 2 }, out }; + + MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc); + } + + DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_CpuAcc") + { + using namespace half_float::literal; + + std::vector in = { 1.0_h, 1.0_h, 2.0_h, 2.0_h, 3.0_h, 3.0_h }; + TestHalfTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, in }; + hidl_vec axisDimensions = { 1 }; + int32_t axisValues[] = { 2 }; + int32_t keepDims = 1; + std::vector out = { 2.0_h, 2.0_h }; + TestHalfTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, out }; + + MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc); + } +} + +DOCTEST_TEST_SUITE("MeanTests_1.2_GpuAcc") +{ + DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_GpuAcc") + { + using namespace half_float::literal; + + TestHalfTensor input{ armnn::TensorShape{ 4, 3, 2 }, + { 1.0_h, 2.0_h, 3.0_h, 4.0_h, 5.0_h, 6.0_h, 7.0_h, 8.0_h, 9.0_h, 10.0_h, + 11.0_h, 12.0_h, 13.0_h, 14.0_h, 15.0_h, 16.0_h, 17.0_h, 18.0_h, 19.0_h, + 20.0_h, 21.0_h, 22.0_h, 23.0_h, 24.0_h } }; + hidl_vec axisDimensions = { 2 }; + int32_t axisValues[] = { 0, 1 }; + int32_t keepDims = 0; + TestHalfTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0_h, 13.0_h } }; + + MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc); + } + + DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_GpuAcc") + { + using namespace half_float::literal; + + TestHalfTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0_h, 1.0_h, 2.0_h, 2.0_h, 3.0_h, 3.0_h } }; + hidl_vec axisDimensions = { 1 }; + int32_t axisValues[] = { 2 }; + int32_t keepDims = 1; + TestHalfTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0_h, 2.0_h } }; + + MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc); + } +} +#endif -- cgit v1.2.1