From 77605826a353981d41f0ee346850d411770535f8 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Mon, 3 Sep 2018 11:25:56 +0100 Subject: IVGCVSW-1713 Create a minimum unit test to compare the results before and after passing the FP16 flag in the Android-nn-driver Change-Id: If8d4ca12421c3bee2526eec98f11d393af822373 --- test/Convolution2D.hpp | 129 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 test/Convolution2D.hpp (limited to 'test/Convolution2D.hpp') diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp new file mode 100644 index 00000000..ff417d96 --- /dev/null +++ b/test/Convolution2D.hpp @@ -0,0 +1,129 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "DriverTestHelpers.hpp" + +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(Convolution2DTests) + +using namespace android::hardware; +using namespace driverTestHelpers; +using namespace armnn_driver; + +namespace driverTestHelpers +{ + +void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled); + +#ifdef ARMNN_ANDROID_NN_V1_1 +void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled); +#endif + +template +void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled = false) +{ + using HalModel = typename HalPolicy::Model; + using HalOperationType = typename HalPolicy::OperationType; + + auto driver = std::make_unique(DriverOptions(armnn::Compute::GpuAcc, fp16Enabled)); + HalModel model = {}; + + uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1; + + // add operands + float weightValue[] = {1.f, -1.f, 0.f, 1.f}; + float biasValue[] = {0.f}; + + AddInputOperand(model, hidl_vec{1, 2, 3, 1}); + AddTensorOperand(model, hidl_vec{1, 2, 2, 1}, weightValue); + AddTensorOperand(model, hidl_vec{1}, biasValue); + AddIntOperand(model, (int32_t)paddingScheme); // padding + AddIntOperand(model, 2); // stride x + AddIntOperand(model, 2); // stride y + AddIntOperand(model, 0); // no activation + AddOutputOperand(model, hidl_vec{1, 1, outSize, 1}); + + // make the convolution operation + model.operations.resize(1); + model.operations[0].type = HalOperationType::CONV_2D; + model.operations[0].inputs = hidl_vec{0, 1, 2, 3, 4, 5, 6}; + model.operations[0].outputs = hidl_vec{7}; + + // make the prepared model + SetModelFp16Flag(model, fp16Enabled); + android::sp preparedModel = PrepareModel(model, *driver); + + // construct the request + DataLocation inloc = {}; + inloc.poolIndex = 0; + inloc.offset = 0; + inloc.length = 6 * sizeof(float); + RequestArgument input = {}; + input.location = inloc; + input.dimensions = hidl_vec{}; + + DataLocation outloc = {}; + outloc.poolIndex = 1; + outloc.offset = 0; + outloc.length = outSize * sizeof(float); + RequestArgument output = {}; + output.location = outloc; + output.dimensions = hidl_vec{}; + + Request request = {}; + request.inputs = hidl_vec{input}; + request.outputs = hidl_vec{output}; + + // set the input data (matching source test) + float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f}; + AddPoolAndSetData(6, request, indata); + + // add memory for the output + android::sp outMemory = AddPoolAndGetData(outSize, request); + float* outdata = reinterpret_cast(static_cast(outMemory->getPointer())); + + // run the execution + Execute(preparedModel, request); + + // check the result + switch (paddingScheme) + { + case android::nn::kPaddingValid: + if (fp16Enabled) + { + BOOST_TEST(outdata[0] == 1022.f); + } + else + { + BOOST_TEST(outdata[0] == 1022.25f); + } + break; + case android::nn::kPaddingSame: + if (fp16Enabled) + { + BOOST_TEST(outdata[0] == 1022.f); + BOOST_TEST(outdata[1] == 0.f); + } + else + { + BOOST_TEST(outdata[0] == 1022.25f); + BOOST_TEST(outdata[1] == 0.f); + } + break; + default: + BOOST_TEST(false); + break; + } +} + +} // namespace driverTestHelpers + +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1