aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-05-17 14:11:25 +0100
committertelsoa01 <telmo.soares@arm.com>2018-05-23 16:23:49 +0100
commit49b9e100bfbb3b8da01472a0ff48b2bd92944e01 (patch)
tree1a998fa12f665ff0a15b299d8bae5590e0aed884 /test
parent28adb40e1bb1d3f3a06a7f333f7f2a4f42d3ed4b (diff)
downloadandroid-nn-driver-49b9e100bfbb3b8da01472a0ff48b2bd92944e01.tar.gz
Release 18.05
Diffstat (limited to 'test')
-rw-r--r--test/Android.mk15
-rw-r--r--test/Concurrent.cpp109
-rw-r--r--test/Convolution2D.cpp110
-rw-r--r--test/DriverTestHelpers.cpp218
-rw-r--r--test/DriverTestHelpers.hpp135
-rw-r--r--test/FullyConnected.cpp254
-rw-r--r--test/GenericLayerTests.cpp196
-rw-r--r--test/Merger.cpp408
-rw-r--r--test/SystemProperties.cpp57
-rw-r--r--test/TestTensor.cpp32
-rw-r--r--test/TestTensor.hpp32
-rw-r--r--test/Tests.cpp933
-rw-r--r--test/UtilsTests.cpp6
13 files changed, 1569 insertions, 936 deletions
diff --git a/test/Android.mk b/test/Android.mk
index 95de4617..d74afecc 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -28,7 +28,15 @@ LOCAL_CFLAGS := \
LOCAL_SRC_FILES := \
Tests.cpp \
- UtilsTests.cpp
+ UtilsTests.cpp \
+ Concurrent.cpp \
+ Convolution2D.cpp \
+ FullyConnected.cpp \
+ GenericLayerTests.cpp \
+ DriverTestHelpers.cpp \
+ SystemProperties.cpp \
+ Merger.cpp \
+ TestTensor.cpp
LOCAL_STATIC_LIBRARIES := \
libarmnn-driver \
@@ -45,9 +53,8 @@ LOCAL_SHARED_LIBRARIES := \
libhidlbase \
libhidltransport \
libhidlmemory \
- libtextclassifier \
- libtextclassifier_hash \
liblog \
+ libtextclassifier_hash \
libutils \
android.hardware.neuralnetworks@1.0 \
android.hidl.allocator@1.0 \
@@ -63,6 +70,8 @@ LOCAL_ARM_MODE := arm
# Mark source files as dependent on Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+LOCAL_PROPRIETARY_MODULE := true
+
include $(BUILD_EXECUTABLE)
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
new file mode 100644
index 00000000..16734dc3
--- /dev/null
+++ b/test/Concurrent.cpp
@@ -0,0 +1,109 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+BOOST_AUTO_TEST_SUITE(ConcurrentDriverTests)
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using namespace android::nn;
+using namespace driverTestHelpers;
+
+// Add our own test for concurrent execution
+// The main point of this test is to check that multiple requests can be
+// executed without waiting for the callback from previous execution.
+// The operations performed are not significant.
+BOOST_AUTO_TEST_CASE(ConcurrentExecute)
+{
+ ALOGI("ConcurrentExecute: entry");
+
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+ Model model = {};
+
+ // add operands
+ int32_t actValue = 0;
+ float weightValue[] = {2, 4, 1};
+ float biasValue[] = {4};
+
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand(model, actValue);
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
+
+ // make the fully connected operation
+ model.operations.resize(1);
+ model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
+ model.operations[0].outputs = hidl_vec<uint32_t>{4};
+
+ // make the prepared models
+ const size_t maxRequests = 5;
+ android::sp<IPreparedModel> preparedModels[maxRequests];
+ for (size_t i = 0; i < maxRequests; ++i)
+ {
+ preparedModels[i] = PrepareModel(model, *driver);
+ }
+
+ // construct the request data
+ DataLocation inloc = {};
+ inloc.poolIndex = 0;
+ inloc.offset = 0;
+ inloc.length = 3 * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = hidl_vec<uint32_t>{};
+
+ DataLocation outloc = {};
+ outloc.poolIndex = 1;
+ outloc.offset = 0;
+ outloc.length = 1 * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = hidl_vec<uint32_t>{};
+
+ // build the requests
+ Request requests[maxRequests];
+ android::sp<IMemory> outMemory[maxRequests];
+ float* outdata[maxRequests];
+ for (size_t i = 0; i < maxRequests; ++i)
+ {
+ requests[i].inputs = hidl_vec<RequestArgument>{input};
+ requests[i].outputs = hidl_vec<RequestArgument>{output};
+ // set the input data (matching source test)
+ float indata[] = {2, 32, 16};
+ AddPoolAndSetData(3, requests[i], indata);
+ // add memory for the output
+ outMemory[i] = AddPoolAndGetData(1, requests[i]);
+ outdata[i] = static_cast<float*>(static_cast<void*>(outMemory[i]->getPointer()));
+ }
+
+ // invoke the execution of the requests
+ ALOGI("ConcurrentExecute: executing requests");
+ android::sp<ExecutionCallback> cb[maxRequests];
+ for (size_t i = 0; i < maxRequests; ++i)
+ {
+ cb[i] = ExecuteNoWait(preparedModels[i], requests[i]);
+ }
+
+ // wait for the requests to complete
+ ALOGI("ConcurrentExecute: waiting for callbacks");
+ for (size_t i = 0; i < maxRequests; ++i)
+ {
+ cb[i]->wait();
+ }
+
+ // check the results
+ ALOGI("ConcurrentExecute: validating results");
+ for (size_t i = 0; i < maxRequests; ++i)
+ {
+ BOOST_TEST(outdata[i][0] == 152);
+ }
+ ALOGI("ConcurrentExecute: exit");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/Convolution2D.cpp b/test/Convolution2D.cpp
new file mode 100644
index 00000000..90edb415
--- /dev/null
+++ b/test/Convolution2D.cpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+#include "OperationsUtils.h"
+
+BOOST_AUTO_TEST_SUITE(Convolution2DTests)
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using namespace driverTestHelpers;
+
+namespace
+{
+
+void PaddingTestImpl(android::nn::PaddingScheme paddingScheme)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+ Model model = {};
+
+ uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1;
+
+ // add operands
+ float weightValue[] = {1, -1, 0, 1};
+ float biasValue[] = {0};
+
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 3, 1});
+ AddTensorOperand(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
+ AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand(model, (int32_t)paddingScheme); // padding
+ AddIntOperand(model, 2); // stride x
+ AddIntOperand(model, 2); // stride y
+ AddIntOperand(model, 0); // no activation
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, outSize, 1});
+
+ // make the convolution operation
+ model.operations.resize(1);
+ model.operations[0].type = OperationType::CONV_2D;
+ model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
+ model.operations[0].outputs = hidl_vec<uint32_t>{7};
+
+ // make the prepared model
+ android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+
+ // construct the request
+ DataLocation inloc = {};
+ inloc.poolIndex = 0;
+ inloc.offset = 0;
+ inloc.length = 6 * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = hidl_vec<uint32_t>{};
+
+ DataLocation outloc = {};
+ outloc.poolIndex = 1;
+ outloc.offset = 0;
+ outloc.length = outSize * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = hidl_vec<uint32_t>{};
+
+ Request request = {};
+ request.inputs = hidl_vec<RequestArgument>{input};
+ request.outputs = hidl_vec<RequestArgument>{output};
+
+
+ // set the input data (matching source test)
+ float indata[] = {4, 1, 0, 3, -1, 2};
+ AddPoolAndSetData(6, request, indata);
+
+ // add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(outSize, request);
+ float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+
+ // run the execution
+ Execute(preparedModel, request);
+
+ // check the result
+ if (paddingScheme == android::nn::kPaddingValid)
+ {
+ BOOST_TEST(outdata[0] == 2);
+ }
+ else if (paddingScheme == android::nn::kPaddingSame)
+ {
+ BOOST_TEST(outdata[0] == 2);
+ BOOST_TEST(outdata[1] == 0);
+ }
+ else
+ {
+ BOOST_TEST(false);
+ }
+}
+
+} // namespace <anonymous>
+
+BOOST_AUTO_TEST_CASE(ConvValidPadding)
+{
+ PaddingTestImpl(android::nn::kPaddingValid);
+}
+
+BOOST_AUTO_TEST_CASE(ConvSamePadding)
+{
+ PaddingTestImpl(android::nn::kPaddingSame);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
new file mode 100644
index 00000000..5b371921
--- /dev/null
+++ b/test/DriverTestHelpers.cpp
@@ -0,0 +1,218 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include <log/log.h>
+#include <boost/test/unit_test.hpp>
+
+namespace android
+{
+namespace hardware
+{
+namespace neuralnetworks
+{
+namespace V1_0
+{
+
+std::ostream& operator<<(std::ostream& os, ErrorStatus stat)
+{
+ return os << static_cast<int>(stat);
+}
+
+} // namespace android::hardware::neuralnetworks::V1_0
+} // namespace android::hardware::neuralnetworks
+} // namespace android::hardware
+} // namespace android
+
+
+namespace driverTestHelpers
+{
+
+Return<void> ExecutionCallback::notify(ErrorStatus status)
+{
+ (void)status;
+ ALOGI("ExecutionCallback::notify invoked");
+ std::lock_guard<std::mutex> executionLock(mMutex);
+ mNotified = true;
+ mCondition.notify_one();
+ return Void();
+}
+
+Return<void> ExecutionCallback::wait()
+{
+ ALOGI("ExecutionCallback::wait invoked");
+ std::unique_lock<std::mutex> executionLock(mMutex);
+ while (!mNotified)
+ {
+ mCondition.wait(executionLock);
+ }
+ mNotified = false;
+ return Void();
+}
+
+Return<void> PreparedModelCallback::notify(ErrorStatus status,
+ const android::sp<IPreparedModel>& preparedModel)
+{
+ m_ErrorStatus = status;
+ m_PreparedModel = preparedModel;
+ return Void();
+}
+
+// lifted from common/Utils.cpp
+hidl_memory allocateSharedMemory(int64_t size)
+{
+ hidl_memory memory;
+
+ const std::string& type = "ashmem";
+ android::sp<IAllocator> allocator = IAllocator::getService(type);
+ allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
+ if (!success)
+ {
+ ALOGE("unable to allocate %li bytes of %s", size, type.c_str());
+ }
+ else
+ {
+ memory = mem;
+ }
+ });
+
+ return memory;
+}
+
+android::sp<IMemory> AddPoolAndGetData(uint32_t size, Request& request)
+{
+ hidl_memory pool;
+
+ android::sp<IAllocator> allocator = IAllocator::getService("ashmem");
+ allocator->allocate(sizeof(float) * size, [&](bool success, const hidl_memory& mem) {
+ BOOST_TEST(success);
+ pool = mem;
+ });
+
+ request.pools.resize(request.pools.size() + 1);
+ request.pools[request.pools.size() - 1] = pool;
+
+ android::sp<IMemory> mapped = mapMemory(pool);
+ mapped->update();
+ return mapped;
+}
+
+void AddPoolAndSetData(uint32_t size, Request& request, const float* data)
+{
+ android::sp<IMemory> memory = AddPoolAndGetData(size, request);
+
+ float* dst = static_cast<float*>(static_cast<void*>(memory->getPointer()));
+
+ memcpy(dst, data, size * sizeof(float));
+}
+
+void AddOperand(Model& model, const Operand& op)
+{
+ model.operands.resize(model.operands.size() + 1);
+ model.operands[model.operands.size() - 1] = op;
+}
+
+void AddIntOperand(Model& model, int32_t value)
+{
+ DataLocation location = {};
+ location.offset = model.operandValues.size();
+ location.length = sizeof(int32_t);
+
+ Operand op = {};
+ op.type = OperandType::INT32;
+ op.dimensions = hidl_vec<uint32_t>{};
+ op.lifetime = OperandLifeTime::CONSTANT_COPY;
+ op.location = location;
+
+ model.operandValues.resize(model.operandValues.size() + location.length);
+ *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
+
+ AddOperand(model, op);
+}
+
+void AddInputOperand(Model& model, hidl_vec<uint32_t> dimensions)
+{
+ Operand op = {};
+ op.type = OperandType::TENSOR_FLOAT32;
+ op.dimensions = dimensions;
+ op.lifetime = OperandLifeTime::MODEL_INPUT;
+
+ AddOperand(model, op);
+
+ model.inputIndexes.resize(model.inputIndexes.size() + 1);
+ model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1;
+}
+
+void AddOutputOperand(Model& model, hidl_vec<uint32_t> dimensions)
+{
+ Operand op = {};
+ op.type = OperandType::TENSOR_FLOAT32;
+ op.dimensions = dimensions;
+ op.lifetime = OperandLifeTime::MODEL_OUTPUT;
+
+ AddOperand(model, op);
+
+ model.outputIndexes.resize(model.outputIndexes.size() + 1);
+ model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
+}
+
+
+android::sp<IPreparedModel> PrepareModelWithStatus(const Model& model,
+ armnn_driver::ArmnnDriver& driver,
+ ErrorStatus & prepareStatus,
+ ErrorStatus expectedStatus)
+{
+
+ android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
+ driver.prepareModel(model, cb);
+
+ prepareStatus = cb->GetErrorStatus();
+ BOOST_TEST(prepareStatus == expectedStatus);
+ if (expectedStatus == ErrorStatus::NONE)
+ {
+ BOOST_TEST((cb->GetPreparedModel() != nullptr));
+ }
+ return cb->GetPreparedModel();
+}
+
+android::sp<IPreparedModel> PrepareModel(const Model& model,
+ armnn_driver::ArmnnDriver& driver)
+{
+ ErrorStatus prepareStatus = ErrorStatus::NONE;
+ return PrepareModelWithStatus(model, driver, prepareStatus);
+}
+
+ErrorStatus Execute(android::sp<IPreparedModel> preparedModel,
+ const Request& request,
+ ErrorStatus expectedStatus)
+{
+ android::sp<ExecutionCallback> cb(new ExecutionCallback());
+ ErrorStatus execStatus = preparedModel->execute(request, cb);
+ BOOST_TEST(execStatus == expectedStatus);
+ ALOGI("Execute: waiting for callback to be invoked");
+ cb->wait();
+ return execStatus;
+}
+
+android::sp<ExecutionCallback> ExecuteNoWait(android::sp<IPreparedModel> preparedModel, const Request& request)
+{
+ android::sp<ExecutionCallback> cb(new ExecutionCallback());
+ BOOST_TEST(preparedModel->execute(request, cb) == ErrorStatus::NONE);
+ ALOGI("ExecuteNoWait: returning callback object");
+ return cb;
+}
+
+template<>
+OperandType TypeToOperandType<float>()
+{
+ return OperandType::TENSOR_FLOAT32;
+};
+
+template<>
+OperandType TypeToOperandType<int32_t>()
+{
+ return OperandType::TENSOR_INT32;
+};
+
+} // namespace driverTestHelpers
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
new file mode 100644
index 00000000..e90f7ecf
--- /dev/null
+++ b/test/DriverTestHelpers.hpp
@@ -0,0 +1,135 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#ifndef LOG_TAG
+#define LOG_TAG "ArmnnDriverTests"
+#endif // LOG_TAG
+
+#include "../ArmnnDriver.hpp"
+#include <iosfwd>
+
+namespace android
+{
+namespace hardware
+{
+namespace neuralnetworks
+{
+namespace V1_0
+{
+
+std::ostream& operator<<(std::ostream& os, ErrorStatus stat);
+
+} // namespace android::hardware::neuralnetworks::V1_0
+} // namespace android::hardware::neuralnetworks
+} // namespace android::hardware
+} // namespace android
+
+namespace driverTestHelpers
+{
+
+std::ostream& operator<<(std::ostream& os, android::hardware::neuralnetworks::V1_0::ErrorStatus stat);
+
+struct ExecutionCallback : public IExecutionCallback
+{
+ ExecutionCallback() : mNotified(false) {}
+ Return<void> notify(ErrorStatus status) override;
+ /// wait until the callback has notified us that it is done
+ Return<void> wait();
+
+private:
+ // use a mutex and a condition variable to wait for asynchronous callbacks
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ // and a flag, in case we are notified before the wait call
+ bool mNotified;
+};
+
+class PreparedModelCallback : public IPreparedModelCallback
+{
+public:
+ PreparedModelCallback()
+ : m_ErrorStatus(ErrorStatus::NONE)
+ , m_PreparedModel()
+ { }
+ ~PreparedModelCallback() override { }
+
+ Return<void> notify(ErrorStatus status,
+ const android::sp<IPreparedModel>& preparedModel) override;
+ ErrorStatus GetErrorStatus() { return m_ErrorStatus; }
+ android::sp<IPreparedModel> GetPreparedModel() { return m_PreparedModel; }
+
+private:
+ ErrorStatus m_ErrorStatus;
+ android::sp<IPreparedModel> m_PreparedModel;
+};
+
+hidl_memory allocateSharedMemory(int64_t size);
+
+android::sp<IMemory> AddPoolAndGetData(uint32_t size, Request& request);
+
+void AddPoolAndSetData(uint32_t size, Request& request, const float* data);
+
+void AddOperand(Model& model, const Operand& op);
+
+void AddIntOperand(Model& model, int32_t value);
+
+template<typename T>
+OperandType TypeToOperandType();
+
+template<>
+OperandType TypeToOperandType<float>();
+
+template<>
+OperandType TypeToOperandType<int32_t>();
+
+template<typename T>
+void AddTensorOperand(Model& model, hidl_vec<uint32_t> dimensions, T* values)
+{
+ uint32_t totalElements = 1;
+ for (uint32_t dim : dimensions)
+ {
+ totalElements *= dim;
+ }
+
+ DataLocation location = {};
+ location.offset = model.operandValues.size();
+ location.length = totalElements * sizeof(T);
+
+ Operand op = {};
+ op.type = TypeToOperandType<T>();
+ op.dimensions = dimensions;
+ op.lifetime = OperandLifeTime::CONSTANT_COPY;
+ op.location = location;
+
+ model.operandValues.resize(model.operandValues.size() + location.length);
+ for (uint32_t i = 0; i < totalElements; i++)
+ {
+ *(reinterpret_cast<T*>(&model.operandValues[location.offset]) + i) = values[i];
+ }
+
+ AddOperand(model, op);
+}
+
+void AddInputOperand(Model& model, hidl_vec<uint32_t> dimensions);
+
+void AddOutputOperand(Model& model, hidl_vec<uint32_t> dimensions);
+
+android::sp<IPreparedModel> PrepareModel(const Model& model,
+ armnn_driver::ArmnnDriver& driver);
+
+android::sp<IPreparedModel> PrepareModelWithStatus(const Model& model,
+ armnn_driver::ArmnnDriver& driver,
+ ErrorStatus & prepareStatus,
+ ErrorStatus expectedStatus=ErrorStatus::NONE);
+
+ErrorStatus Execute(android::sp<IPreparedModel> preparedModel,
+ const Request& request,
+ ErrorStatus expectedStatus=ErrorStatus::NONE);
+
+android::sp<ExecutionCallback> ExecuteNoWait(android::sp<IPreparedModel> preparedModel,
+ const Request& request);
+
+} // namespace driverTestHelpers
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
new file mode 100644
index 00000000..ea6c8715
--- /dev/null
+++ b/test/FullyConnected.cpp
@@ -0,0 +1,254 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+BOOST_AUTO_TEST_SUITE(FullyConnectedTests)
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using namespace driverTestHelpers;
+
+// Add our own test here since we fail the fc tests which Google supplies (because of non-const weights)
+BOOST_AUTO_TEST_CASE(FullyConnected)
+{
+ // this should ideally replicate fully_connected_float.model.cpp
+ // but that uses slightly weird dimensions which I don't think we need to support for now
+
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+ Model model = {};
+
+ // add operands
+ int32_t actValue = 0;
+ float weightValue[] = {2, 4, 1};
+ float biasValue[] = {4};
+
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand(model, actValue);
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
+
+ // make the fully connected operation
+ model.operations.resize(1);
+ model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
+ model.operations[0].outputs = hidl_vec<uint32_t>{4};
+
+ // make the prepared model
+ android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+
+ // construct the request
+ DataLocation inloc = {};
+ inloc.poolIndex = 0;
+ inloc.offset = 0;
+ inloc.length = 3 * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = hidl_vec<uint32_t>{};
+
+ DataLocation outloc = {};
+ outloc.poolIndex = 1;
+ outloc.offset = 0;
+ outloc.length = 1 * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = hidl_vec<uint32_t>{};
+
+ Request request = {};
+ request.inputs = hidl_vec<RequestArgument>{input};
+ request.outputs = hidl_vec<RequestArgument>{output};
+
+ // set the input data (matching source test)
+ float indata[] = {2, 32, 16};
+ AddPoolAndSetData(3, request, indata);
+
+ // add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(1, request);
+ float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+
+ // run the execution
+ Execute(preparedModel, request);
+
+ // check the result
+ BOOST_TEST(outdata[0] == 152);
+}
+
+BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+
+ ErrorStatus error;
+ std::vector<bool> sup;
+
+ ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+ {
+ error = status;
+ sup = supported;
+ };
+
+ Model model = {};
+
+ // operands
+ int32_t actValue = 0;
+ float weightValue[] = {1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1}; //identity
+ float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0};
+
+ // fully connected operation
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 1, 1, 8});
+ AddTensorOperand(model, hidl_vec<uint32_t>{8, 8}, weightValue);
+ AddTensorOperand(model, hidl_vec<uint32_t>{8}, biasValue);
+ AddIntOperand(model, actValue);
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 8});
+
+ model.operations.resize(1);
+
+ model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
+ model.operations[0].outputs = hidl_vec<uint32_t>{4};
+
+ // make the prepared model
+ android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+
+
+ // construct the request
+ DataLocation inloc = {};
+ inloc.poolIndex = 0;
+ inloc.offset = 0;
+ inloc.length = 8 * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = hidl_vec<uint32_t>{};
+
+ DataLocation outloc = {};
+ outloc.poolIndex = 1;
+ outloc.offset = 0;
+ outloc.length = 8 * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = hidl_vec<uint32_t>{};
+
+ Request request = {};
+ request.inputs = hidl_vec<RequestArgument>{input};
+ request.outputs = hidl_vec<RequestArgument>{output};
+
+ // set the input data
+ float indata[] = {1,2,3,4,5,6,7,8};
+ AddPoolAndSetData(8, request, indata);
+
+ // add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(8, request);
+ float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+
+ // run the execution
+ Execute(preparedModel, request);
+
+ // check the result
+ BOOST_TEST(outdata[0] == 1);
+ BOOST_TEST(outdata[1] == 2);
+ BOOST_TEST(outdata[2] == 3);
+ BOOST_TEST(outdata[3] == 4);
+ BOOST_TEST(outdata[4] == 5);
+ BOOST_TEST(outdata[5] == 6);
+ BOOST_TEST(outdata[6] == 7);
+ BOOST_TEST(outdata[7] == 8);
+}
+
+BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+
+ ErrorStatus error;
+ std::vector<bool> sup;
+
+ ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+ {
+ error = status;
+ sup = supported;
+ };
+
+ Model model = {};
+
+ // operands
+ int32_t actValue = 0;
+ float weightValue[] = {1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1}; //identity
+ float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0};
+
+ // fully connected operation
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 2, 2});
+ AddTensorOperand(model, hidl_vec<uint32_t>{8, 8}, weightValue);
+ AddTensorOperand(model, hidl_vec<uint32_t>{8}, biasValue);
+ AddIntOperand(model, actValue);
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 8});
+
+ model.operations.resize(1);
+
+ model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
+ model.operations[0].outputs = hidl_vec<uint32_t>{4};
+
+ // make the prepared model
+ android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+
+
+ // construct the request
+ DataLocation inloc = {};
+ inloc.poolIndex = 0;
+ inloc.offset = 0;
+ inloc.length = 8 * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = hidl_vec<uint32_t>{};
+
+ DataLocation outloc = {};
+ outloc.poolIndex = 1;
+ outloc.offset = 0;
+ outloc.length = 8 * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = hidl_vec<uint32_t>{};
+
+ Request request = {};
+ request.inputs = hidl_vec<RequestArgument>{input};
+ request.outputs = hidl_vec<RequestArgument>{output};
+
+ // set the input data
+ float indata[] = {1,2,3,4,5,6,7,8};
+ AddPoolAndSetData(8, request, indata);
+
+ // add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(8, request);
+ float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+
+ // run the execution
+ Execute(preparedModel, request);
+
+ // check the result
+ BOOST_TEST(outdata[0] == 1);
+ BOOST_TEST(outdata[1] == 2);
+ BOOST_TEST(outdata[2] == 3);
+ BOOST_TEST(outdata[3] == 4);
+ BOOST_TEST(outdata[4] == 5);
+ BOOST_TEST(outdata[5] == 6);
+ BOOST_TEST(outdata[6] == 7);
+ BOOST_TEST(outdata[7] == 8);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
new file mode 100644
index 00000000..5c6c041d
--- /dev/null
+++ b/test/GenericLayerTests.cpp
@@ -0,0 +1,196 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+BOOST_AUTO_TEST_SUITE(GenericLayerTests)
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using namespace driverTestHelpers;
+
+BOOST_AUTO_TEST_CASE(GetSupportedOperations)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+
+ ErrorStatus error;
+ std::vector<bool> sup;
+
+ ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+ {
+ error = status;
+ sup = supported;
+ };
+
+ Model model1 = {};
+
+ // add operands
+ int32_t actValue = 0;
+ float weightValue[] = {2, 4, 1};
+ float biasValue[] = {4};
+
+ AddInputOperand(model1, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand(model1, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand(model1, actValue);
+ AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
+
+ // make a correct fully connected operation
+ model1.operations.resize(2);
+ model1.operations[0].type = OperationType::FULLY_CONNECTED;
+ model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
+ model1.operations[0].outputs = hidl_vec<uint32_t>{4};
+
+ // make an incorrect fully connected operation
+ AddIntOperand(model1, actValue);
+ AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
+ model1.operations[1].type = OperationType::FULLY_CONNECTED;
+ model1.operations[1].inputs = hidl_vec<uint32_t>{4};
+ model1.operations[1].outputs = hidl_vec<uint32_t>{5};
+
+ driver->getSupportedOperations(model1, cb);
+ BOOST_TEST((int)error == (int)ErrorStatus::NONE);
+ BOOST_TEST(sup[0] == true);
+ BOOST_TEST(sup[1] == false);
+
+ // Broadcast add/mul are not supported
+ Model model2 = {};
+
+ AddInputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddInputOperand(model2, hidl_vec<uint32_t>{4});
+ AddOutputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddOutputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
+
+ model2.operations.resize(2);
+
+ model2.operations[0].type = OperationType::ADD;
+ model2.operations[0].inputs = hidl_vec<uint32_t>{0,1};
+ model2.operations[0].outputs = hidl_vec<uint32_t>{2};
+
+ model2.operations[1].type = OperationType::MUL;
+ model2.operations[1].inputs = hidl_vec<uint32_t>{0,1};
+ model2.operations[1].outputs = hidl_vec<uint32_t>{3};
+
+ driver->getSupportedOperations(model2, cb);
+ BOOST_TEST((int)error == (int)ErrorStatus::NONE);
+ BOOST_TEST(sup[0] == false);
+ BOOST_TEST(sup[1] == false);
+
+ Model model3 = {};
+
+ // Add unsupported operation, should return no error but we don't support it
+ AddInputOperand(model3, hidl_vec<uint32_t>{1, 1, 1, 8});
+ AddIntOperand(model3, 2);
+ AddOutputOperand(model3, hidl_vec<uint32_t>{1, 2, 2, 2});
+ model3.operations.resize(1);
+ model3.operations[0].type = OperationType::DEPTH_TO_SPACE;
+ model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1};
+ model3.operations[0].outputs = hidl_vec<uint32_t>{2};
+
+ driver->getSupportedOperations(model3, cb);
+ BOOST_TEST((int)error == (int)ErrorStatus::NONE);
+ BOOST_TEST(sup[0] == false);
+
+ // Add invalid operation
+ Model model4 = {};
+ AddIntOperand(model4, 0);
+ model4.operations.resize(1);
+ model4.operations[0].type = static_cast<OperationType>(100);
+ model4.operations[0].outputs = hidl_vec<uint32_t>{0};
+
+ driver->getSupportedOperations(model4, cb);
+ BOOST_TEST((int)error == (int)ErrorStatus::INVALID_ARGUMENT);
+}
+
+// The purpose of this test is to ensure that when encountering an unsupported operation
+// it is skipped and getSupportedOperations() continues (rather than failing and stopping).
+// As per IVGCVSW-710.
+BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+
+ ErrorStatus error;
+ std::vector<bool> sup;
+
+ ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+ {
+ error = status;
+ sup = supported;
+ };
+
+ Model model = {};
+
+ // operands
+ int32_t actValue = 0;
+ float weightValue[] = {2, 4, 1};
+ float biasValue[] = {4};
+
+ // broadcast add is unsupported at the time of writing this test, but any unsupported layer will do
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddInputOperand(model, hidl_vec<uint32_t>{4});
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
+
+ // fully connected
+ AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand(model, actValue);
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
+
+ // broadcast mul is unsupported
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
+
+ model.operations.resize(3);
+
+ // unsupported
+ model.operations[0].type = OperationType::ADD;
+ model.operations[0].inputs = hidl_vec<uint32_t>{0,1};
+ model.operations[0].outputs = hidl_vec<uint32_t>{2};
+
+ // supported
+ model.operations[1].type = OperationType::FULLY_CONNECTED;
+ model.operations[1].inputs = hidl_vec<uint32_t>{3, 4, 5, 6};
+ model.operations[1].outputs = hidl_vec<uint32_t>{7};
+
+ // unsupported
+ model.operations[2].type = OperationType::MUL;
+ model.operations[2].inputs = hidl_vec<uint32_t>{0,1};
+ model.operations[2].outputs = hidl_vec<uint32_t>{8};
+
+ // we are testing that the unsupported layers return false and the test continues
+ // rather than failing and stopping.
+ driver->getSupportedOperations(model, cb);
+ BOOST_TEST((int)error == (int)ErrorStatus::NONE);
+ BOOST_TEST(sup[0] == false);
+ BOOST_TEST(sup[1] == true);
+ BOOST_TEST(sup[2] == false);
+}
+
+// The purpose of this test is to ensure that when encountering an failure
+// during mem pool mapping we properly report an error to the framework via a callback
+BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
+{
+ auto driver = std::make_unique<ArmnnDriver>(armnn::Compute::CpuRef);
+
+ ErrorStatus error;
+ std::vector<bool> sup;
+
+ ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+ {
+ error = status;
+ sup = supported;
+ };
+
+ Model model = {};
+
+ model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
+
+ //memory pool mapping should fail, we should report an error
+ driver->getSupportedOperations(model, cb);
+ BOOST_TEST((int)error == (int)ErrorStatus::GENERAL_FAILURE);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/Merger.cpp b/test/Merger.cpp
new file mode 100644
index 00000000..6c069a86
--- /dev/null
+++ b/test/Merger.cpp
@@ -0,0 +1,408 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include "TestTensor.hpp"
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+
+BOOST_AUTO_TEST_SUITE(MergerTests)
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using namespace driverTestHelpers;
+
+namespace
+{
+
+void
+MergerTestImpl(const std::vector<const TestTensor*> & inputs,
+ int32_t concatAxis,
+ const TestTensor & expectedOutputTensor,
+ ErrorStatus expectedPrepareStatus=ErrorStatus::NONE,
+ ErrorStatus expectedExecStatus=ErrorStatus::NONE)
+{
+ std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
+ Model model{};
+
+ hidl_vec<uint32_t> modelInputIds;
+ modelInputIds.resize(inputs.size()+1);
+ for (uint32_t i = 0; i<inputs.size(); ++i)
+ {
+ modelInputIds[i] = i;
+ AddInputOperand(model, inputs[i]->GetDimensions());
+ }
+ modelInputIds[inputs.size()] = inputs.size(); // add an id for the axis too
+ AddIntOperand(model, concatAxis);
+ AddOutputOperand(model, expectedOutputTensor.GetDimensions());
+
+ // make the concat operation
+ model.operations.resize(1);
+ model.operations[0].type = OperationType::CONCATENATION;
+ model.operations[0].inputs = modelInputIds;
+ model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
+
+ // make the prepared model
+ ErrorStatus prepareStatus=ErrorStatus::NONE;
+ android::sp<IPreparedModel> preparedModel = PrepareModelWithStatus(model,
+ *driver,
+ prepareStatus,
+ expectedPrepareStatus);
+ BOOST_TEST(prepareStatus == expectedPrepareStatus);
+ if (prepareStatus != ErrorStatus::NONE)
+ {
+ // prepare failed, we cannot continue
+ return;
+ }
+
+ BOOST_TEST(preparedModel.get() != nullptr);
+ if (preparedModel.get() == nullptr)
+ {
+ // don't spoil other tests if prepare failed
+ return;
+ }
+
+ // construct the request
+ hidl_vec<RequestArgument> inputArguments;
+ hidl_vec<RequestArgument> outputArguments;
+ inputArguments.resize(inputs.size());
+ outputArguments.resize(1);
+
+ // the request's memory pools will follow the same order as
+ // the inputs
+ for (uint32_t i = 0; i<inputs.size(); ++i)
+ {
+ DataLocation inloc = {};
+ inloc.poolIndex = i;
+ inloc.offset = 0;
+ inloc.length = inputs[i]->GetNumElements() * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = inputs[i]->GetDimensions();
+ inputArguments[i] = input;
+ }
+
+ // and an additional memory pool is needed for the output
+ {
+ DataLocation outloc = {};
+ outloc.poolIndex = inputs.size();
+ outloc.offset = 0;
+ outloc.length = expectedOutputTensor.GetNumElements() * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = expectedOutputTensor.GetDimensions();
+ outputArguments[0] = output;
+ }
+
+ // make the request based on the arguments
+ Request request = {};
+ request.inputs = inputArguments;
+ request.outputs = outputArguments;
+
+ // set the input data
+ for (uint32_t i = 0; i<inputs.size(); ++i)
+ {
+ AddPoolAndSetData(inputs[i]->GetNumElements(),
+ request,
+ inputs[i]->GetData());
+ }
+
+ // add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(expectedOutputTensor.GetNumElements(), request);
+ float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+
+ // run the execution
+ auto execStatus = Execute(preparedModel, request, expectedExecStatus);
+ BOOST_TEST(execStatus == expectedExecStatus);
+
+ if (execStatus == ErrorStatus::NONE)
+ {
+ // check the result if there was no error
+ const float * expectedOutput = expectedOutputTensor.GetData();
+ for (unsigned int i=0; i<expectedOutputTensor.GetNumElements();++i)
+ {
+ BOOST_TEST(outdata[i] == expectedOutput[i]);
+ }
+ }
+}
+
+} // namespace <anonymous>
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis0)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{3,1,1,1},{0,1,2}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(ConcatAxis0_NoInterleave)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{2,1,2,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{3,1,2,1},{4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
+
+ TestTensor expected{armnn::TensorShape{6,1,2,1},{0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis1)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,3,1,1},{0,1,2}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(ConcatAxis1_NoInterleave)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{1,2,2,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,3,2,1},{4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
+
+ TestTensor expected{armnn::TensorShape{1,6,2,1},{0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis1_DoInterleave)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{2,2,1,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{2,3,1,1},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{2,1,1,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{2,6,1,1},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis2)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,1,3,1},{0,1,2}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(ConcatAxis2_NoInterleave)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,3,2},{4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,2},{10, 11}};
+
+ TestTensor expected{armnn::TensorShape{1,1,6,2},{0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis2_DoInterleave)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,2,2,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,2,3,1},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,2,1,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1,2,6,1},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis3)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,1,1,3},{0,1,2}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(SimpleConcatAxis3_DoInterleave)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_CASE(AxisTooBig)
+{
+ int32_t axis = 4;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+
+ // The axis must be within the range of [-rank(values), rank(values))
+ // see: https://www.tensorflow.org/api_docs/python/tf/concat
+ TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn, &bIn}, axis, uncheckedOutput, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(AxisTooSmall)
+{
+ int32_t axis = -5;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+
+ // The axis must be within the range of [-rank(values), rank(values))
+ // see: https://www.tensorflow.org/api_docs/python/tf/concat
+ TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn, &bIn}, axis, uncheckedOutput, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(TooFewInputs)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+
+ // We need at least two tensors to concatenate
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn}, axis, aIn, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(MismatchedInputDimensions)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor mismatched{armnn::TensorShape{1,1,1,1},{10}};
+
+ TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ // The input dimensions must be compatible
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn, &bIn, &mismatched}, axis, expected, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(MismatchedInputRanks)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,2},{0,1}};
+ TestTensor bIn{armnn::TensorShape{1,1},{4}};
+ TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}};
+
+ // The input dimensions must be compatible
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn, &bIn}, axis, expected, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(MismatchedOutputDimensions)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor mismatched{armnn::TensorShape{1,1,6,2},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ // The input and output dimensions must be compatible
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(MismatchedOutputRank)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor mismatched{armnn::TensorShape{6,2},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ // The input and output ranks must match
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, expectedParserStatus);
+}
+
+BOOST_AUTO_TEST_CASE(ValidNegativeAxis)
+{
+ // this is the same as 3
+ // see: https://www.tensorflow.org/api_docs/python/tf/concat
+ int32_t axis = -1;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ MergerTestImpl({&aIn, &bIn, &cIn}, axis, expected);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/SystemProperties.cpp b/test/SystemProperties.cpp
new file mode 100644
index 00000000..9bdf151e
--- /dev/null
+++ b/test/SystemProperties.cpp
@@ -0,0 +1,57 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "DriverTestHelpers.hpp"
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+#include "../SystemPropertiesUtils.hpp"
+
+BOOST_AUTO_TEST_SUITE(SystemProperiesTests)
+
+BOOST_AUTO_TEST_CASE(SystemProperties)
+{
+ // Test default value
+ {
+ auto p = __system_property_find("thisDoesNotExist");
+ BOOST_TEST((p == nullptr));
+
+ int defaultValue = ParseSystemProperty("thisDoesNotExist", -4);
+ BOOST_TEST((defaultValue == -4));
+ }
+
+ // Test default value from bad data type
+ {
+ __system_property_set("thisIsNotFloat", "notfloat");
+ float defaultValue = ParseSystemProperty("thisIsNotFloat", 0.1f);
+ BOOST_TEST((defaultValue == 0.1f));
+ }
+
+ // Test fetching bool values
+ {
+ __system_property_set("myTestBool", "1");
+ bool b = ParseSystemProperty("myTestBool", false);
+ BOOST_TEST((b == true));
+ }
+ {
+ __system_property_set("myTestBool", "0");
+ bool b = ParseSystemProperty("myTestBool", true);
+ BOOST_TEST((b == false));
+ }
+
+ // Test fetching int
+ {
+ __system_property_set("myTestInt", "567");
+ int i = ParseSystemProperty("myTestInt", 890);
+ BOOST_TEST((i==567));
+ }
+
+ // Test fetching float
+ {
+ __system_property_set("myTestFloat", "1.2f");
+ float f = ParseSystemProperty("myTestFloat", 3.4f);
+ BOOST_TEST((f==1.2f));
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/TestTensor.cpp b/test/TestTensor.cpp
new file mode 100644
index 00000000..0766ef50
--- /dev/null
+++ b/test/TestTensor.cpp
@@ -0,0 +1,32 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "TestTensor.hpp"
+
+namespace driverTestHelpers
+{
+
+hidl_vec<uint32_t> TestTensor::GetDimensions() const
+{
+ hidl_vec<uint32_t> dimensions;
+ dimensions.resize(m_Shape.GetNumDimensions());
+ for (uint32_t i=0; i<m_Shape.GetNumDimensions(); ++i)
+ {
+ dimensions[i] = m_Shape[i];
+ }
+ return dimensions;
+}
+
+unsigned int TestTensor::GetNumElements() const
+{
+ return m_Shape.GetNumElements();
+}
+
+const float * TestTensor::GetData() const
+{
+ BOOST_ASSERT(m_Data.empty() == false);
+ return &m_Data[0];
+}
+
+} // namespace driverTestHelpers
diff --git a/test/TestTensor.hpp b/test/TestTensor.hpp
new file mode 100644
index 00000000..974e7b93
--- /dev/null
+++ b/test/TestTensor.hpp
@@ -0,0 +1,32 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "../ArmnnDriver.hpp"
+
+namespace driverTestHelpers
+{
+
+class TestTensor
+{
+public:
+ TestTensor(const armnn::TensorShape & shape,
+ const std::vector<float> & data)
+ : m_Shape{shape}
+ , m_Data{data}
+ {
+ BOOST_ASSERT(m_Shape.GetNumElements() == m_Data.size());
+ }
+
+ hidl_vec<uint32_t> GetDimensions() const;
+ unsigned int GetNumElements() const;
+ const float * GetData() const;
+
+private:
+ armnn::TensorShape m_Shape;
+ std::vector<float> m_Data;
+};
+
+} // driverTestHelpers
diff --git a/test/Tests.cpp b/test/Tests.cpp
index 0ab2908b..37aece7c 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -2,43 +2,18 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// See LICENSE file in the project root for full license information.
//
-
#define LOG_TAG "ArmnnDriverTests"
#define BOOST_TEST_MODULE armnn_driver_tests
#include <boost/test/unit_test.hpp>
#include <log/log.h>
-#include "../ArmnnDriver.hpp"
-#include "../SystemPropertiesUtils.hpp"
-
-#include "OperationsUtils.h"
-
-#include <condition_variable>
-
-namespace android
-{
-namespace hardware
-{
-namespace neuralnetworks
-{
-namespace V1_0
-{
-
-std::ostream& operator<<(std::ostream& os, ErrorStatus stat)
-{
- return os << static_cast<int>(stat);
-}
-
-}
-}
-}
-}
+#include "DriverTestHelpers.hpp"
BOOST_AUTO_TEST_SUITE(DriverTests)
-using namespace armnn_driver;
-using namespace android::nn;
-using namespace android;
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using namespace driverTestHelpers;
BOOST_AUTO_TEST_CASE(Init)
{
@@ -73,904 +48,4 @@ BOOST_AUTO_TEST_CASE(TestCapabilities)
BOOST_TEST(cap.quantized8Performance.powerUsage > 0.f);
}
-BOOST_AUTO_TEST_CASE(SystemProperties)
-{
- // Test default value
- {
- auto p = __system_property_find("thisDoesNotExist");
- BOOST_TEST((p == nullptr));
-
- int defaultValue = ParseSystemProperty("thisDoesNotExist", -4);
- BOOST_TEST((defaultValue == -4));
- }
-
- // Test default value from bad data type
- {
- __system_property_set("thisIsNotFloat", "notfloat");
- float defaultValue = ParseSystemProperty("thisIsNotFloat", 0.1f);
- BOOST_TEST((defaultValue == 0.1f));
- }
-
- // Test fetching bool values
- {
- __system_property_set("myTestBool", "1");
- bool b = ParseSystemProperty("myTestBool", false);
- BOOST_TEST((b == true));
- }
- {
- __system_property_set("myTestBool", "0");
- bool b = ParseSystemProperty("myTestBool", true);
- BOOST_TEST((b == false));
- }
-
- // Test fetching int
- {
- __system_property_set("myTestInt", "567");
- int i = ParseSystemProperty("myTestInt", 890);
- BOOST_TEST((i==567));
- }
-
- // Test fetching float
- {
- __system_property_set("myTestFloat", "1.2f");
- float f = ParseSystemProperty("myTestFloat", 3.4f);
- BOOST_TEST((f==1.2f));
- }
-}
-
-// The following are helpers for writing unit tests for the driver
-namespace
-{
-
-struct ExecutionCallback : public IExecutionCallback
-{
- ExecutionCallback()
- : mNotified(false)
- {
- }
-
- Return<void> notify(ErrorStatus status) override
- {
- (void)status;
- ALOGI("ExecutionCallback::notify invoked");
- std::lock_guard<std::mutex> executionLock(mMutex);
- mNotified = true;
- mCondition.notify_one();
- return Void();
- }
-
- /// wait until the callback has notified us that it is done
- Return<void> wait()
- {
- ALOGI("ExecutionCallback::wait invoked");
- std::unique_lock<std::mutex> executionLock(mMutex);
- while (!mNotified)
- {
- mCondition.wait(executionLock);
- }
- mNotified = false;
- return Void();
- }
-
-private:
- // use a mutex and a condition variable to wait for asynchronous callbacks
- std::mutex mMutex;
- std::condition_variable mCondition;
- // and a flag, in case we are notified before the wait call
- bool mNotified;
-};
-
-class PreparedModelCallback : public IPreparedModelCallback
-{
-public:
- PreparedModelCallback()
- {
- }
-
- ~PreparedModelCallback() override
- {
- }
-
- Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override
- {
- m_ErrorStatus = status;
- m_PreparedModel = preparedModel;
- return Void();
- }
-
- ErrorStatus GetErrorStatus()
- {
- return m_ErrorStatus;
- }
-
- sp<IPreparedModel> GetPreparedModel()
- {
- return m_PreparedModel;
- }
-
-
-private:
- ErrorStatus m_ErrorStatus;
- sp<IPreparedModel> m_PreparedModel;
-};
-
-// lifted from common/Utils.cpp
-hidl_memory allocateSharedMemory(int64_t size)
-{
- hidl_memory memory;
-
- const std::string& type = "ashmem";
- android::sp<IAllocator> allocator = IAllocator::getService(type);
- allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
- if (!success)
- {
- ALOGE("unable to allocate %li bytes of %s", size, type.c_str());
- }
- else
- {
- memory = mem;
- }
- });
-
- return memory;
-}
-
-
-android::sp<IMemory> AddPoolAndGetData(uint32_t size, Request& request)
-{
- hidl_memory pool;
-
- android::sp<IAllocator> allocator = IAllocator::getService("ashmem");
- allocator->allocate(sizeof(float) * size, [&](bool success, const hidl_memory& mem) {
- BOOST_TEST(success);
- pool = mem;
- });
-
- request.pools.resize(request.pools.size() + 1);
- request.pools[request.pools.size() - 1] = pool;
-
- android::sp<IMemory> mapped = mapMemory(pool);
- mapped->update();
- return mapped;
-}
-
-void AddPoolAndSetData(uint32_t size, Request& request, float* data)
-{
- android::sp<IMemory> memory = AddPoolAndGetData(size, request);
-
- float* dst = static_cast<float*>(static_cast<void*>(memory->getPointer()));
-
- memcpy(dst, data, size * sizeof(float));
-}
-
-void AddOperand(Model& model, const Operand& op)
-{
- model.operands.resize(model.operands.size() + 1);
- model.operands[model.operands.size() - 1] = op;
-}
-
-void AddIntOperand(Model& model, int32_t value)
-{
- DataLocation location = {};
- location.offset = model.operandValues.size();
- location.length = sizeof(int32_t);
-
- Operand op = {};
- op.type = OperandType::INT32;
- op.dimensions = hidl_vec<uint32_t>{};
- op.lifetime = OperandLifeTime::CONSTANT_COPY;
- op.location = location;
-
- model.operandValues.resize(model.operandValues.size() + location.length);
- *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
-
- AddOperand(model, op);
-}
-
-template<typename T>
-OperandType TypeToOperandType();
-
-template<>
-OperandType TypeToOperandType<float>()
-{
- return OperandType::TENSOR_FLOAT32;
-};
-
-template<>
-OperandType TypeToOperandType<int32_t>()
-{
- return OperandType::TENSOR_INT32;
-};
-
-
-
-template<typename T>
-void AddTensorOperand(Model& model, hidl_vec<uint32_t> dimensions, T* values)
-{
- uint32_t totalElements = 1;
- for (uint32_t dim : dimensions)
- {
- totalElements *= dim;
- }
-
- DataLocation location = {};
- location.offset = model.operandValues.size();
- location.length = totalElements * sizeof(T);
-
- Operand op = {};
- op.type = TypeToOperandType<T>();
- op.dimensions = dimensions;
- op.lifetime = OperandLifeTime::CONSTANT_COPY;
- op.location = location;
-
- model.operandValues.resize(model.operandValues.size() + location.length);
- for (uint32_t i = 0; i < totalElements; i++)
- {
- *(reinterpret_cast<T*>(&model.operandValues[location.offset]) + i) = values[i];
- }
-
- AddOperand(model, op);
-}
-
-void AddInputOperand(Model& model, hidl_vec<uint32_t> dimensions)
-{
- Operand op = {};
- op.type = OperandType::TENSOR_FLOAT32;
- op.dimensions = dimensions;
- op.lifetime = OperandLifeTime::MODEL_INPUT;
-
- AddOperand(model, op);
-
- model.inputIndexes.resize(model.inputIndexes.size() + 1);
- model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1;
-}
-
-void AddOutputOperand(Model& model, hidl_vec<uint32_t> dimensions)
-{
- Operand op = {};
- op.type = OperandType::TENSOR_FLOAT32;
- op.dimensions = dimensions;
- op.lifetime = OperandLifeTime::MODEL_OUTPUT;
-
- AddOperand(model, op);
-
- model.outputIndexes.resize(model.outputIndexes.size() + 1);
- model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
-}
-
-android::sp<IPreparedModel> PrepareModel(const Model& model, ArmnnDriver& driver)
-{
-
- sp<PreparedModelCallback> cb(new PreparedModelCallback());
- driver.prepareModel(model, cb);
-
- BOOST_TEST((cb->GetErrorStatus() == ErrorStatus::NONE));
- BOOST_TEST((cb->GetPreparedModel() != nullptr));
-
- return cb->GetPreparedModel();
-}
-
-void Execute(android::sp<IPreparedModel> preparedModel, const Request& request)
-{
- sp<ExecutionCallback> cb(new ExecutionCallback());
- BOOST_TEST(preparedModel->execute(request, cb) == ErrorStatus::NONE);
- ALOGI("Execute: waiting for callback to be invoked");
- cb->wait();
-}
-
-sp<ExecutionCallback> ExecuteNoWait(android::sp<IPreparedModel> preparedModel, const Request& request)
-{
- sp<ExecutionCallback> cb(new ExecutionCallback());
- BOOST_TEST(preparedModel->execute(request, cb) == ErrorStatus::NONE);
- ALOGI("ExecuteNoWait: returning callback object");
- return cb;
-}
-}
-
-// Add our own test here since we fail the fc tests which Google supplies (because of non-const weights)
-BOOST_AUTO_TEST_CASE(FullyConnected)
-{
- // this should ideally replicate fully_connected_float.model.cpp
- // but that uses slightly weird dimensions which I don't think we need to support for now
-
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model = {};
-
- // add operands
- int32_t actValue = 0;
- float weightValue[] = {2, 4, 1};
- float biasValue[] = {4};
-
- AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
-
- // make the fully connected operation
- model.operations.resize(1);
- model.operations[0].type = OperationType::FULLY_CONNECTED;
- model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
- model.operations[0].outputs = hidl_vec<uint32_t>{4};
-
- // make the prepared model
- android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
-
- // construct the request
- DataLocation inloc = {};
- inloc.poolIndex = 0;
- inloc.offset = 0;
- inloc.length = 3 * sizeof(float);
- RequestArgument input = {};
- input.location = inloc;
- input.dimensions = hidl_vec<uint32_t>{};
-
- DataLocation outloc = {};
- outloc.poolIndex = 1;
- outloc.offset = 0;
- outloc.length = 1 * sizeof(float);
- RequestArgument output = {};
- output.location = outloc;
- output.dimensions = hidl_vec<uint32_t>{};
-
- Request request = {};
- request.inputs = hidl_vec<RequestArgument>{input};
- request.outputs = hidl_vec<RequestArgument>{output};
-
- // set the input data (matching source test)
- float indata[] = {2, 32, 16};
- AddPoolAndSetData(3, request, indata);
-
- // add memory for the output
- android::sp<IMemory> outMemory = AddPoolAndGetData(1, request);
- float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
-
- // run the execution
- Execute(preparedModel, request);
-
- // check the result
- BOOST_TEST(outdata[0] == 152);
-}
-
-// Add our own test for concurrent execution
-// The main point of this test is to check that multiple requests can be
-// executed without waiting for the callback from previous execution.
-// The operations performed are not significant.
-BOOST_AUTO_TEST_CASE(ConcurrentExecute)
-{
- ALOGI("ConcurrentExecute: entry");
-
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model = {};
-
- // add operands
- int32_t actValue = 0;
- float weightValue[] = {2, 4, 1};
- float biasValue[] = {4};
-
- AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
-
- // make the fully connected operation
- model.operations.resize(1);
- model.operations[0].type = OperationType::FULLY_CONNECTED;
- model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
- model.operations[0].outputs = hidl_vec<uint32_t>{4};
-
- // make the prepared models
- const size_t maxRequests = 5;
- android::sp<IPreparedModel> preparedModels[maxRequests];
- for (size_t i = 0; i < maxRequests; ++i)
- {
- preparedModels[i] = PrepareModel(model, *driver);
- }
-
- // construct the request data
- DataLocation inloc = {};
- inloc.poolIndex = 0;
- inloc.offset = 0;
- inloc.length = 3 * sizeof(float);
- RequestArgument input = {};
- input.location = inloc;
- input.dimensions = hidl_vec<uint32_t>{};
-
- DataLocation outloc = {};
- outloc.poolIndex = 1;
- outloc.offset = 0;
- outloc.length = 1 * sizeof(float);
- RequestArgument output = {};
- output.location = outloc;
- output.dimensions = hidl_vec<uint32_t>{};
-
- // build the requests
- Request requests[maxRequests];
- android::sp<IMemory> outMemory[maxRequests];
- float* outdata[maxRequests];
- for (size_t i = 0; i < maxRequests; ++i)
- {
- requests[i].inputs = hidl_vec<RequestArgument>{input};
- requests[i].outputs = hidl_vec<RequestArgument>{output};
- // set the input data (matching source test)
- float indata[] = {2, 32, 16};
- AddPoolAndSetData(3, requests[i], indata);
- // add memory for the output
- outMemory[i] = AddPoolAndGetData(1, requests[i]);
- outdata[i] = static_cast<float*>(static_cast<void*>(outMemory[i]->getPointer()));
- }
-
- // invoke the execution of the requests
- ALOGI("ConcurrentExecute: executing requests");
- sp<ExecutionCallback> cb[maxRequests];
- for (size_t i = 0; i < maxRequests; ++i)
- {
- cb[i] = ExecuteNoWait(preparedModels[i], requests[i]);
- }
-
- // wait for the requests to complete
- ALOGI("ConcurrentExecute: waiting for callbacks");
- for (size_t i = 0; i < maxRequests; ++i)
- {
- cb[i]->wait();
- }
-
- // check the results
- ALOGI("ConcurrentExecute: validating results");
- for (size_t i = 0; i < maxRequests; ++i)
- {
- BOOST_TEST(outdata[i][0] == 152);
- }
- ALOGI("ConcurrentExecute: exit");
-}
-
-BOOST_AUTO_TEST_CASE(GetSupportedOperations)
-{
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
-
- ErrorStatus error;
- std::vector<bool> sup;
-
- ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
- {
- error = status;
- sup = supported;
- };
-
- Model model1 = {};
-
- // add operands
- int32_t actValue = 0;
- float weightValue[] = {2, 4, 1};
- float biasValue[] = {4};
-
- AddInputOperand(model1, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model1, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model1, actValue);
- AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
-
- // make a correct fully connected operation
- model1.operations.resize(2);
- model1.operations[0].type = OperationType::FULLY_CONNECTED;
- model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
- model1.operations[0].outputs = hidl_vec<uint32_t>{4};
-
- // make an incorrect fully connected operation
- AddIntOperand(model1, actValue);
- AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
- model1.operations[1].type = OperationType::FULLY_CONNECTED;
- model1.operations[1].inputs = hidl_vec<uint32_t>{4};
- model1.operations[1].outputs = hidl_vec<uint32_t>{5};
-
- driver->getSupportedOperations(model1, cb);
- BOOST_TEST((int)error == (int)ErrorStatus::NONE);
- BOOST_TEST(sup[0] == true);
- BOOST_TEST(sup[1] == false);
-
- // Broadcast add/mul are not supported
- Model model2 = {};
-
- AddInputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddInputOperand(model2, hidl_vec<uint32_t>{4});
- AddOutputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddOutputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
-
- model2.operations.resize(2);
-
- model2.operations[0].type = OperationType::ADD;
- model2.operations[0].inputs = hidl_vec<uint32_t>{0,1};
- model2.operations[0].outputs = hidl_vec<uint32_t>{2};
-
- model2.operations[1].type = OperationType::MUL;
- model2.operations[1].inputs = hidl_vec<uint32_t>{0,1};
- model2.operations[1].outputs = hidl_vec<uint32_t>{3};
-
- driver->getSupportedOperations(model2, cb);
- BOOST_TEST((int)error == (int)ErrorStatus::NONE);
- BOOST_TEST(sup[0] == false);
- BOOST_TEST(sup[1] == false);
-
- Model model3 = {};
-
- // Add unsupported operation, should return no error but we don't support it
- AddInputOperand(model3, hidl_vec<uint32_t>{1, 1, 1, 8});
- AddIntOperand(model3, 2);
- AddOutputOperand(model3, hidl_vec<uint32_t>{1, 2, 2, 2});
- model3.operations.resize(1);
- model3.operations[0].type = OperationType::DEPTH_TO_SPACE;
- model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1};
- model3.operations[0].outputs = hidl_vec<uint32_t>{2};
-
- driver->getSupportedOperations(model3, cb);
- BOOST_TEST((int)error == (int)ErrorStatus::NONE);
- BOOST_TEST(sup[0] == false);
-
- // Add invalid operation
- Model model4 = {};
- AddIntOperand(model4, 0);
- model4.operations.resize(1);
- model4.operations[0].type = static_cast<OperationType>(100);
- model4.operations[0].outputs = hidl_vec<uint32_t>{0};
-
- driver->getSupportedOperations(model4, cb);
- BOOST_TEST((int)error == (int)ErrorStatus::INVALID_ARGUMENT);
-}
-
-// The purpose of this test is to ensure that when encountering an unsupported operation
-// it is skipped and getSupportedOperations() continues (rather than failing and stopping).
-// As per IVGCVSW-710.
-BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
-{
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
-
- ErrorStatus error;
- std::vector<bool> sup;
-
- ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
- {
- error = status;
- sup = supported;
- };
-
- Model model = {};
-
- // operands
- int32_t actValue = 0;
- float weightValue[] = {2, 4, 1};
- float biasValue[] = {4};
-
- // broadcast add is unsupported at the time of writing this test, but any unsupported layer will do
- AddInputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddInputOperand(model, hidl_vec<uint32_t>{4});
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
-
- // fully connected
- AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
-
- // broadcast mul is unsupported
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
-
- model.operations.resize(3);
-
- // unsupported
- model.operations[0].type = OperationType::ADD;
- model.operations[0].inputs = hidl_vec<uint32_t>{0,1};
- model.operations[0].outputs = hidl_vec<uint32_t>{2};
-
- // supported
- model.operations[1].type = OperationType::FULLY_CONNECTED;
- model.operations[1].inputs = hidl_vec<uint32_t>{3, 4, 5, 6};
- model.operations[1].outputs = hidl_vec<uint32_t>{7};
-
- // unsupported
- model.operations[2].type = OperationType::MUL;
- model.operations[2].inputs = hidl_vec<uint32_t>{0,1};
- model.operations[2].outputs = hidl_vec<uint32_t>{8};
-
- // we are testing that the unsupported layers return false and the test continues
- // rather than failing and stopping.
- driver->getSupportedOperations(model, cb);
- BOOST_TEST((int)error == (int)ErrorStatus::NONE);
- BOOST_TEST(sup[0] == false);
- BOOST_TEST(sup[1] == true);
- BOOST_TEST(sup[2] == false);
-}
-
-// The purpose of this test is to ensure that when encountering an failure
-// during mem pool mapping we properly report an error to the framework via a callback
-BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
-{
- auto driver = std::make_unique<ArmnnDriver>(armnn::Compute::CpuRef);
-
- ErrorStatus error;
- std::vector<bool> sup;
-
- ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
- {
- error = status;
- sup = supported;
- };
-
- Model model = {};
-
- model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
-
- //memory pool mapping should fail, we should report an error
- driver->getSupportedOperations(model, cb);
- BOOST_TEST((int)error == (int)ErrorStatus::GENERAL_FAILURE);
-}
-
-namespace
-{
-
-void PaddingTestImpl(android::nn::PaddingScheme paddingScheme)
-{
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model = {};
-
- uint32_t outSize = paddingScheme == kPaddingSame ? 2 : 1;
-
- // add operands
- float weightValue[] = {1, -1, 0, 1};
- float biasValue[] = {0};
-
- AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 3, 1});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, (int32_t)paddingScheme); // padding
- AddIntOperand(model, 2); // stride x
- AddIntOperand(model, 2); // stride y
- AddIntOperand(model, 0); // no activation
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, outSize, 1});
-
- // make the convolution operation
- model.operations.resize(1);
- model.operations[0].type = OperationType::CONV_2D;
- model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
- model.operations[0].outputs = hidl_vec<uint32_t>{7};
-
- // make the prepared model
- android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
-
- // construct the request
- DataLocation inloc = {};
- inloc.poolIndex = 0;
- inloc.offset = 0;
- inloc.length = 6 * sizeof(float);
- RequestArgument input = {};
- input.location = inloc;
- input.dimensions = hidl_vec<uint32_t>{};
-
- DataLocation outloc = {};
- outloc.poolIndex = 1;
- outloc.offset = 0;
- outloc.length = outSize * sizeof(float);
- RequestArgument output = {};
- output.location = outloc;
- output.dimensions = hidl_vec<uint32_t>{};
-
- Request request = {};
- request.inputs = hidl_vec<RequestArgument>{input};
- request.outputs = hidl_vec<RequestArgument>{output};
-
-
- // set the input data (matching source test)
- float indata[] = {4, 1, 0, 3, -1, 2};
- AddPoolAndSetData(6, request, indata);
-
- // add memory for the output
- android::sp<IMemory> outMemory = AddPoolAndGetData(outSize, request);
- float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
-
- // run the execution
- Execute(preparedModel, request);
-
- // check the result
- if (paddingScheme == kPaddingValid)
- {
- BOOST_TEST(outdata[0] == 2);
- }
- else if (paddingScheme == kPaddingSame)
- {
- BOOST_TEST(outdata[0] == 2);
- BOOST_TEST(outdata[1] == 0);
- }
- else
- {
- BOOST_TEST(false);
- }
-}
-
-}
-
-BOOST_AUTO_TEST_CASE(ConvValidPadding)
-{
- PaddingTestImpl(kPaddingValid);
-}
-
-BOOST_AUTO_TEST_CASE(ConvSamePadding)
-{
- PaddingTestImpl(kPaddingSame);
-}
-
-BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
-{
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
-
- ErrorStatus error;
- std::vector<bool> sup;
-
- ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
- {
- error = status;
- sup = supported;
- };
-
- Model model = {};
-
- // operands
- int32_t actValue = 0;
- float weightValue[] = {1, 0, 0, 0, 0, 0, 0, 0,
- 0, 1, 0, 0, 0, 0, 0, 0,
- 0, 0, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 0, 0, 1}; //identity
- float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0};
-
- // fully connected operation
- AddInputOperand(model, hidl_vec<uint32_t>{1, 1, 1, 8});
- AddTensorOperand(model, hidl_vec<uint32_t>{8, 8}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{8}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 8});
-
- model.operations.resize(1);
-
- model.operations[0].type = OperationType::FULLY_CONNECTED;
- model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
- model.operations[0].outputs = hidl_vec<uint32_t>{4};
-
- // make the prepared model
- android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
-
-
- // construct the request
- DataLocation inloc = {};
- inloc.poolIndex = 0;
- inloc.offset = 0;
- inloc.length = 8 * sizeof(float);
- RequestArgument input = {};
- input.location = inloc;
- input.dimensions = hidl_vec<uint32_t>{};
-
- DataLocation outloc = {};
- outloc.poolIndex = 1;
- outloc.offset = 0;
- outloc.length = 8 * sizeof(float);
- RequestArgument output = {};
- output.location = outloc;
- output.dimensions = hidl_vec<uint32_t>{};
-
- Request request = {};
- request.inputs = hidl_vec<RequestArgument>{input};
- request.outputs = hidl_vec<RequestArgument>{output};
-
- // set the input data
- float indata[] = {1,2,3,4,5,6,7,8};
- AddPoolAndSetData(8, request, indata);
-
- // add memory for the output
- android::sp<IMemory> outMemory = AddPoolAndGetData(8, request);
- float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
-
- // run the execution
- Execute(preparedModel, request);
-
- // check the result
- BOOST_TEST(outdata[0] == 1);
- BOOST_TEST(outdata[1] == 2);
- BOOST_TEST(outdata[2] == 3);
- BOOST_TEST(outdata[3] == 4);
- BOOST_TEST(outdata[4] == 5);
- BOOST_TEST(outdata[5] == 6);
- BOOST_TEST(outdata[6] == 7);
- BOOST_TEST(outdata[7] == 8);
-}
-
-BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
-{
- auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
-
- ErrorStatus error;
- std::vector<bool> sup;
-
- ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
- {
- error = status;
- sup = supported;
- };
-
- Model model = {};
-
- // operands
- int32_t actValue = 0;
- float weightValue[] = {1, 0, 0, 0, 0, 0, 0, 0,
- 0, 1, 0, 0, 0, 0, 0, 0,
- 0, 0, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 0, 0, 1}; //identity
- float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0};
-
- // fully connected operation
- AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 2, 2});
- AddTensorOperand(model, hidl_vec<uint32_t>{8, 8}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{8}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 8});
-
- model.operations.resize(1);
-
- model.operations[0].type = OperationType::FULLY_CONNECTED;
- model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
- model.operations[0].outputs = hidl_vec<uint32_t>{4};
-
- // make the prepared model
- android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
-
-
- // construct the request
- DataLocation inloc = {};
- inloc.poolIndex = 0;
- inloc.offset = 0;
- inloc.length = 8 * sizeof(float);
- RequestArgument input = {};
- input.location = inloc;
- input.dimensions = hidl_vec<uint32_t>{};
-
- DataLocation outloc = {};
- outloc.poolIndex = 1;
- outloc.offset = 0;
- outloc.length = 8 * sizeof(float);
- RequestArgument output = {};
- output.location = outloc;
- output.dimensions = hidl_vec<uint32_t>{};
-
- Request request = {};
- request.inputs = hidl_vec<RequestArgument>{input};
- request.outputs = hidl_vec<RequestArgument>{output};
-
- // set the input data
- float indata[] = {1,2,3,4,5,6,7,8};
- AddPoolAndSetData(8, request, indata);
-
- // add memory for the output
- android::sp<IMemory> outMemory = AddPoolAndGetData(8, request);
- float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
-
- // run the execution
- Execute(preparedModel, request);
-
- // check the result
- BOOST_TEST(outdata[0] == 1);
- BOOST_TEST(outdata[1] == 2);
- BOOST_TEST(outdata[2] == 3);
- BOOST_TEST(outdata[3] == 4);
- BOOST_TEST(outdata[4] == 5);
- BOOST_TEST(outdata[5] == 6);
- BOOST_TEST(outdata[6] == 7);
- BOOST_TEST(outdata[7] == 8);
-}
-
BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp
index 49941e56..b429920c 100644
--- a/test/UtilsTests.cpp
+++ b/test/UtilsTests.cpp
@@ -3,12 +3,10 @@
// See LICENSE file in the project root for full license information.
//
-#define LOG_TAG "ArmnnDriverUtilsTests"
-//#define BOOST_TEST_MODULE armnn_driver_utils_tests
+#include "DriverTestHelpers.hpp"
#include <boost/test/unit_test.hpp>
#include <log/log.h>
-#include "../ArmnnDriver.hpp"
#include "../Utils.hpp"
#include <fstream>
@@ -59,7 +57,7 @@ public:
m_FileStream.close();
// Ignore any error (such as file not found).
- remove(m_FileName.c_str());
+ (void)remove(m_FileName.c_str());
}
bool FileExists()