From 77605826a353981d41f0ee346850d411770535f8 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Mon, 3 Sep 2018 11:25:56 +0100 Subject: IVGCVSW-1713 Create a minimum unit test to compare the results before and after passing the FP16 flag in the Android-nn-driver Change-Id: If8d4ca12421c3bee2526eec98f11d393af822373 --- test/DriverTestHelpers.cpp | 79 +++++++++++----------------------------------- 1 file changed, 18 insertions(+), 61 deletions(-) (limited to 'test/DriverTestHelpers.cpp') diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp index 11154912..ded24592 100644 --- a/test/DriverTestHelpers.cpp +++ b/test/DriverTestHelpers.cpp @@ -109,70 +109,32 @@ void AddPoolAndSetData(uint32_t size, Request& request, const float* data) memcpy(dst, data, size * sizeof(float)); } -void AddOperand(neuralnetworks::V1_0::Model& model, const Operand& op) -{ - model.operands.resize(model.operands.size() + 1); - model.operands[model.operands.size() - 1] = op; -} - -void AddIntOperand(neuralnetworks::V1_0::Model& model, int32_t value) -{ - DataLocation location = {}; - location.offset = model.operandValues.size(); - location.length = sizeof(int32_t); - - Operand op = {}; - op.type = OperandType::INT32; - op.dimensions = hidl_vec{}; - op.lifetime = OperandLifeTime::CONSTANT_COPY; - op.location = location; - - model.operandValues.resize(model.operandValues.size() + location.length); - *reinterpret_cast(&model.operandValues[location.offset]) = value; - - AddOperand(model, op); -} - -void AddInputOperand(neuralnetworks::V1_0::Model& model, - hidl_vec dimensions, - neuralnetworks::V1_0::OperandType operandType) -{ - Operand op = {}; - op.type = operandType; - op.dimensions = dimensions; - op.lifetime = OperandLifeTime::MODEL_INPUT; - - AddOperand(model, op); - - model.inputIndexes.resize(model.inputIndexes.size() + 1); - model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1; -} - -void AddOutputOperand(neuralnetworks::V1_0::Model& model, - hidl_vec dimensions, - neuralnetworks::V1_0::OperandType operandType) +android::sp PrepareModelWithStatus(const neuralnetworks::V1_0::Model& model, + armnn_driver::ArmnnDriver& driver, + ErrorStatus& prepareStatus, + ErrorStatus expectedStatus) { - Operand op = {}; - op.type = operandType; - op.scale = operandType == neuralnetworks::V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f; - op.dimensions = dimensions; - op.lifetime = OperandLifeTime::MODEL_OUTPUT; - - AddOperand(model, op); + android::sp cb(new PreparedModelCallback()); + driver.prepareModel(model, cb); - model.outputIndexes.resize(model.outputIndexes.size() + 1); - model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1; + prepareStatus = cb->GetErrorStatus(); + BOOST_TEST(prepareStatus == expectedStatus); + if (expectedStatus == ErrorStatus::NONE) + { + BOOST_TEST((cb->GetPreparedModel() != nullptr)); + } + return cb->GetPreparedModel(); } +#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. -android::sp PrepareModelWithStatus(const neuralnetworks::V1_0::Model& model, +android::sp PrepareModelWithStatus(const neuralnetworks::V1_1::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus & prepareStatus, + ErrorStatus& prepareStatus, ErrorStatus expectedStatus) { - android::sp cb(new PreparedModelCallback()); - driver.prepareModel(model, cb); + driver.prepareModel_1_1(model, neuralnetworks::V1_1::ExecutionPreference::LOW_POWER, cb); prepareStatus = cb->GetErrorStatus(); BOOST_TEST(prepareStatus == expectedStatus); @@ -183,12 +145,7 @@ android::sp PrepareModelWithStatus(const neuralnetworks::V1_0::M return cb->GetPreparedModel(); } -android::sp PrepareModel(const neuralnetworks::V1_0::Model& model, - armnn_driver::ArmnnDriver& driver) -{ - ErrorStatus prepareStatus = ErrorStatus::NONE; - return PrepareModelWithStatus(model, driver, prepareStatus); -} +#endif ErrorStatus Execute(android::sp preparedModel, const Request& request, -- cgit v1.2.1