From 9150bff63a690caa743c471943afe509ebed1044 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 26 May 2021 15:40:53 +0100 Subject: IVGCVSW-4618 'Transition Units Test Suites' * Used doctest in android-nn-driver unit tests. Signed-off-by: Sadik Armagan Change-Id: I9b5d4dfd77d53c7ebee7f8c43628a1d6ff74d1a3 --- test/Convolution2D.hpp | 74 ++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 39 deletions(-) (limited to 'test/Convolution2D.hpp') diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp index c3f9d48c..540cdd7b 100644 --- a/test/Convolution2D.hpp +++ b/test/Convolution2D.hpp @@ -7,13 +7,11 @@ #include "DriverTestHelpers.hpp" -#include +#include #include #include -BOOST_AUTO_TEST_SUITE(Convolution2DTests) - using namespace android::hardware; using namespace driverTestHelpers; using namespace armnn_driver; @@ -25,11 +23,11 @@ namespace driverTestHelpers #define ARMNN_ANDROID_FP16_TEST(result, fp16Expectation, fp32Expectation, fp16Enabled) \ if (fp16Enabled) \ { \ - BOOST_TEST((result == fp16Expectation || result == fp32Expectation), result << \ + CHECK_MESSAGE((result == fp16Expectation || result == fp32Expectation), result << \ " does not match either " << fp16Expectation << "[fp16] or " << fp32Expectation << "[fp32]"); \ } else \ { \ - BOOST_TEST(result == fp32Expectation); \ + CHECK(result == fp32Expectation); \ } void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled); @@ -55,22 +53,22 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled // add operands float weightValue[] = {1.f, -1.f, 0.f, 1.f}; - float biasValue[] = {0.f}; + float biasValue[] = {0.f}; - AddInputOperand(model, hidl_vec{1, 2, 3, 1}); - AddTensorOperand(model, hidl_vec{1, 2, 2, 1}, weightValue); - AddTensorOperand(model, hidl_vec{1}, biasValue); - AddIntOperand(model, (int32_t)paddingScheme); // padding + AddInputOperand(model, hidl_vec < uint32_t > {1, 2, 3, 1}); + AddTensorOperand(model, hidl_vec < uint32_t > {1, 2, 2, 1}, weightValue); + AddTensorOperand(model, hidl_vec < uint32_t > {1}, biasValue); + AddIntOperand(model, (int32_t) paddingScheme); // padding AddIntOperand(model, 2); // stride x AddIntOperand(model, 2); // stride y AddIntOperand(model, 0); // no activation - AddOutputOperand(model, hidl_vec{1, 1, outSize, 1}); + AddOutputOperand(model, hidl_vec < uint32_t > {1, 1, outSize, 1}); // make the convolution operation model.operations.resize(1); model.operations[0].type = HalOperationType::CONV_2D; - model.operations[0].inputs = hidl_vec{0, 1, 2, 3, 4, 5, 6}; - model.operations[0].outputs = hidl_vec{7}; + model.operations[0].inputs = hidl_vec < uint32_t > {0, 1, 2, 3, 4, 5, 6}; + model.operations[0].outputs = hidl_vec < uint32_t > {7}; // make the prepared model SetModelFp16Flag(model, fp16Enabled); @@ -78,24 +76,24 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled // construct the request V1_0::DataLocation inloc = {}; - inloc.poolIndex = 0; - inloc.offset = 0; - inloc.length = 6 * sizeof(float); - RequestArgument input = {}; - input.location = inloc; - input.dimensions = hidl_vec{}; + inloc.poolIndex = 0; + inloc.offset = 0; + inloc.length = 6 * sizeof(float); + RequestArgument input = {}; + input.location = inloc; + input.dimensions = hidl_vec < uint32_t > {}; V1_0::DataLocation outloc = {}; - outloc.poolIndex = 1; - outloc.offset = 0; - outloc.length = outSize * sizeof(float); - RequestArgument output = {}; - output.location = outloc; - output.dimensions = hidl_vec{}; + outloc.poolIndex = 1; + outloc.offset = 0; + outloc.length = outSize * sizeof(float); + RequestArgument output = {}; + output.location = outloc; + output.dimensions = hidl_vec < uint32_t > {}; V1_0::Request request = {}; - request.inputs = hidl_vec{input}; - request.outputs = hidl_vec{output}; + request.inputs = hidl_vec < RequestArgument > {input}; + request.outputs = hidl_vec < RequestArgument > {output}; // set the input data (matching source test) float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f}; @@ -114,19 +112,17 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled // check the result switch (paddingScheme) { - case android::nn::kPaddingValid: - ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled) - break; - case android::nn::kPaddingSame: - ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled) - BOOST_TEST(outdata[1] == 0.f); - break; - default: - BOOST_TEST(false); - break; + case android::nn::kPaddingValid: + ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled) + break; + case android::nn::kPaddingSame: + ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled) + CHECK(outdata[1] == 0.f); + break; + default: + CHECK(false); + break; } } } // namespace driverTestHelpers - -BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1