aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-10-25 15:39:33 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2018-10-25 16:57:26 +0100
commit8d50f8f4b848547635a74e6fdec29f632748ebb1 (patch)
tree50b2c00fa2a9e7a4e1dc24326e39a3338f1ec296
parent50db26cbd83e7753c1581cc8c29b8e575da66ade (diff)
downloadandroid-nn-driver-8d50f8f4b848547635a74e6fdec29f632748ebb1.tar.gz
IVGCVSW-2051 Added unit tests in the Android NN Driver for the new
Mean layer * Added unit tests (in a new file test/1.1/Mean.cpp) * Refactored ArmnnDriverImpl to remove redundant tags in the log messages * Modified AddInputOperand to accept a quantized input tensor Change-Id: Ie037ce426777daab28b0501124e1cc8686ad6184
-rw-r--r--ArmnnDriverImpl.cpp28
-rw-r--r--test/1.1/Mean.cpp137
-rw-r--r--test/Android.mk1
-rw-r--r--test/DriverTestHelpers.hpp1
4 files changed, 151 insertions, 16 deletions
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 267574c1..a3c2e10f 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -120,14 +120,12 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
if (!runtime)
{
- return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE,
- "ArmnnDriverImpl::prepareModel: Device unavailable", cb);
+ return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
}
if (!android::nn::validateModel(model))
{
- return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT,
- "ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb);
+ return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
}
// Deliberately ignore any unsupported operations requested by the options -
@@ -140,8 +138,7 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
if (modelConverter.GetConversionResult() != ConversionResult::Success)
{
- FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
- "ArmnnDriverImpl::prepareModel: ModelToINetworkConverter failed", cb);
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
return ErrorStatus::NONE;
}
@@ -162,7 +159,7 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
catch (armnn::Exception &e)
{
stringstream message;
- message << "ArmnnDriverImpl::prepareModel: armnn::Exception (" << e.what() << ") caught from optimize.";
+ message << "armnn::Exception (" << e.what() << ") caught from optimize.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -171,12 +168,12 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
if (!optNet)
{
stringstream message;
- message << "ArmnnDriverImpl::prepareModel: Invalid optimized network";
- for (const string& msg : errMessages) {
+ message << "Invalid optimized network";
+ for (const string& msg : errMessages)
+ {
message << "\n" << msg;
}
- FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
- message.str(), cb);
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -190,14 +187,13 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
{
if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
{
- return FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
- "ArmnnDriverImpl::prepareModel: Network could not be loaded", cb);
+ return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
}
}
catch (armnn::Exception& e)
{
stringstream message;
- message << "ArmnnDriverImpl::prepareModel: armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
+ message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -225,7 +221,7 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
catch (const armnn::Exception& error)
{
ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
- options.GetClTunedParametersFile().c_str(), error.what());
+ options.GetClTunedParametersFile().c_str(), error.what());
}
}
@@ -252,4 +248,4 @@ template class ArmnnDriverImpl<hal_1_0::HalPolicy>;
template class ArmnnDriverImpl<hal_1_1::HalPolicy>;
#endif
-} // namespace armnn_driver \ No newline at end of file
+} // namespace armnn_driver
diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp
new file mode 100644
index 00000000..4ebb8cfa
--- /dev/null
+++ b/test/1.1/Mean.cpp
@@ -0,0 +1,137 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../DriverTestHelpers.hpp"
+#include "../TestTensor.hpp"
+
+#include <boost/array.hpp>
+#include <boost/test/data/test_case.hpp>
+
+BOOST_AUTO_TEST_SUITE(MeanTests)
+
+using namespace android::hardware;
+using namespace driverTestHelpers;
+using namespace armnn_driver;
+
+namespace
+{
+
+static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
+
+void MeanTestImpl(const TestTensor& input,
+ const hidl_vec<uint32_t>& axisDimensions,
+ const int32_t* axisValues,
+ int32_t keepDims,
+ const TestTensor& expectedOutput,
+ bool fp16Enabled,
+ armnn::Compute computeDevice)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice, fp16Enabled));
+
+ V1_1::Model model = {};
+ AddInputOperand (model, input.GetDimensions());
+ AddTensorOperand(model, axisDimensions, const_cast<int32_t*>(axisValues), OperandType::TENSOR_INT32);
+ AddIntOperand (model, keepDims);
+ AddOutputOperand(model, expectedOutput.GetDimensions());
+
+ model.operations.resize(1);
+ model.operations[0].type = V1_1::OperationType::MEAN;
+ model.operations[0].inputs = hidl_vec<uint32_t>{ 0, 1, 2 };
+ model.operations[0].outputs = hidl_vec<uint32_t>{ 3 };
+ model.relaxComputationFloat32toFloat16 = fp16Enabled;
+
+ android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+
+ // The request's memory pools will follow the same order as the inputs
+ DataLocation inLoc = {};
+ inLoc.poolIndex = 0;
+ inLoc.offset = 0;
+ inLoc.length = input.GetNumElements() * sizeof(float);
+ RequestArgument inArg = {};
+ inArg.location = inLoc;
+ inArg.dimensions = input.GetDimensions();
+
+ // An additional memory pool is needed for the output
+ DataLocation outLoc = {};
+ outLoc.poolIndex = 1;
+ outLoc.offset = 0;
+ outLoc.length = expectedOutput.GetNumElements() * sizeof(float);
+ RequestArgument outArg = {};
+ outArg.location = outLoc;
+ outArg.dimensions = expectedOutput.GetDimensions();
+
+ // Make the request based on the arguments
+ Request request = {};
+ request.inputs = hidl_vec<RequestArgument>{ inArg };
+ request.outputs = hidl_vec<RequestArgument>{ outArg };
+
+ // Set the input data
+ AddPoolAndSetData(input.GetNumElements(), request, input.GetData());
+
+ // Add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(expectedOutput.GetNumElements(), request);
+ const float* outputData = static_cast<const float*>(static_cast<void*>(outMemory->getPointer()));
+
+ ErrorStatus execStatus = Execute(preparedModel, request);
+ BOOST_TEST(execStatus == ErrorStatus::NONE);
+
+ const float* expectedOutputData = expectedOutput.GetData();
+ for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++)
+ {
+ BOOST_TEST(outputData[i] == expectedOutputData[i]);
+ }
+}
+
+} // anonymous namespace
+
+BOOST_DATA_TEST_CASE(MeanNoKeepDimsTest, COMPUTE_DEVICES)
+{
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 }, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, sample);
+}
+
+BOOST_DATA_TEST_CASE(MeanKeepDimsTest, COMPUTE_DEVICES)
+{
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, sample);
+}
+
+BOOST_DATA_TEST_CASE(MeanFp16NoKeepDimsTest, COMPUTE_DEVICES)
+{
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 }, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, sample);
+}
+
+BOOST_DATA_TEST_CASE(MeanFp16KeepDimsTest, COMPUTE_DEVICES)
+{
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, sample);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/Android.mk b/test/Android.mk
index 3a58618b..da3ac706 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -121,6 +121,7 @@ LOCAL_CFLAGS := \
LOCAL_SRC_FILES := \
1.0/Convolution2D.cpp \
1.1/Convolution2D.cpp \
+ 1.1/Mean.cpp \
1.1/Transpose.cpp \
Tests.cpp \
UtilsTests.cpp \
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index cce220e5..370936fe 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -146,6 +146,7 @@ void AddInputOperand(HalModel& model,
{
Operand op = {};
op.type = operandType;
+ op.scale = operandType == OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
op.dimensions = dimensions;
op.lifetime = OperandLifeTime::MODEL_INPUT;